List:Commits« Previous MessageNext Message »
From:reggie Date:January 18 2006 3:40pm
Subject:bk commit into 5.1 tree (reggie:1.2017)
View as plain text  
Below is the list of changes that have just been committed into a local
5.1 repository of reggie. When reggie does a push these changes will
be propagated to the main repository and, within 24 hours after the
push, to the public repository.
For information on how to access the public repository
see http://dev.mysql.com/doc/mysql/en/installing-source-tree.html

ChangeSet
  1.2017 06/01/18 09:40:29 reggie@stripped +7 -0
  Merge rburnett@stripped:/home/bk/mysql-5.1-new
  into  linux.site:/home/reggie/work/mysql-5.1-bug15408-2

  sql/sql_partition.cc
    1.20 06/01/18 09:40:19 reggie@stripped +0 -1
    accepting code committed as part of WL 2604.  Will rework to fix this bug

  sql/ha_partition.cc
    1.20 06/01/18 09:40:19 reggie@stripped +4 -6
    accepting code committed as part of WL 2604.  Will rework to fix this bug

  sql/Makefile.am
    1.129 06/01/18 09:40:19 reggie@stripped +1 -2
    merging

  mysql-test/r/partition_mgm_err.result
    1.4 06/01/18 09:40:19 reggie@stripped +0 -0
    merging

  libmysqld/Makefile.am
    1.79 06/01/18 09:40:19 reggie@stripped +1 -3
    merging

  sql/handler.h
    1.182 06/01/18 09:29:45 reggie@stripped +0 -0
    Auto merged

  mysql-test/t/partition_mgm_err.test
    1.4 06/01/18 09:29:45 reggie@stripped +0 -0
    Auto merged

# This is a BitKeeper patch.  What follows are the unified diffs for the
# set of deltas contained in the patch.  The rest of the patch, the part
# that BitKeeper cares about, is below these diffs.
# User:	reggie
# Host:	linux.site
# Root:	/home/reggie/work/mysql-5.1-bug15408-2/RESYNC

--- 1.128/sql/Makefile.am	2006-01-13 10:04:32 -06:00
+++ 1.129/sql/Makefile.am	2006-01-18 09:40:19 -06:00
@@ -101,7 +101,7 @@
 			sp_cache.cc parse_file.cc sql_trigger.cc \
                         event_executor.cc event.cc event_timed.cc \
 			sql_plugin.cc sql_binlog.cc \
-			handlerton.cc sql_tablespace.cc
+			handlerton.cc sql_tablespace.cc partition_info.cc
 EXTRA_mysqld_SOURCES =	ha_innodb.cc ha_berkeley.cc ha_archive.cc \
 			ha_innodb.h  ha_berkeley.h  ha_archive.h \
 			ha_blackhole.cc ha_federated.cc ha_ndbcluster.cc \

--- 1.181/sql/handler.h	2006-01-17 02:24:49 -06:00
+++ 1.182/sql/handler.h	2006-01-18 09:29:45 -06:00
@@ -859,6 +859,10 @@
     subpart_field_list.empty();
   }
   ~partition_info() {}
+
+  bool has_duplicate_names();
+private:
+  bool has_unique_name(partition_element *el_to_check);
 };
 
 

--- 1.78/libmysqld/Makefile.am	2006-01-13 10:09:23 -06:00
+++ 1.79/libmysqld/Makefile.am	2006-01-18 09:40:19 -06:00
@@ -67,7 +67,7 @@
         event_executor.cc event.cc event_timed.cc \
         rpl_filter.cc sql_partition.cc handlerton.cc sql_plugin.cc \
         sql_tablespace.cc \
-        rpl_injector.cc my_user.c
+        rpl_injector.cc my_user.c ../sql/partition_info.cc
 
 libmysqld_int_a_SOURCES= $(libmysqld_sources) $(libmysqlsources) $(sqlsources)
 EXTRA_libmysqld_a_SOURCES =	ha_innodb.cc ha_berkeley.cc ha_archive.cc \

--- 1.19/sql/ha_partition.cc	2006-01-06 12:40:21 -06:00
+++ 1.20/sql/ha_partition.cc	2006-01-18 09:40:19 -06:00
@@ -67,6 +67,8 @@
 ****************************************************************************/
 
 static handler *partition_create_handler(TABLE_SHARE *share);
+static uint partition_flags();
+static uint alter_table_flags(uint flags);
 
 handlerton partition_hton = {
   MYSQL_HANDLERTON_INTERFACE_VERSION,
@@ -96,14 +98,68 @@
   NULL, /* Start Consistent Snapshot */
   NULL, /* Flush logs */
   NULL, /* Show status */
+  partition_flags, /* Partition flags */
+  alter_table_flags, /* Partition flags */
+  NULL, /* Alter Tablespace */
   HTON_NOT_USER_SELECTABLE | HTON_HIDDEN
 };
 
+/*
+  Create new partition handler
+
+  SYNOPSIS
+    partition_create_handler()
+    table                       Table object
+
+  RETURN VALUE
+    New partition object
+*/
+
 static handler *partition_create_handler(TABLE_SHARE *share)
 {
   return new ha_partition(share);
 }
 
+/*
+  HA_CAN_PARTITION:
+  Used by storage engines that can handle partitioning without this
+  partition handler
+  (Partition, NDB)
+
+  HA_CAN_UPDATE_PARTITION_KEY:
+  Set if the handler can update fields that are part of the partition
+  function.
+
+  HA_CAN_PARTITION_UNIQUE:
+  Set if the handler can handle unique indexes where the fields of the
+  unique key are not part of the fields of the partition function. Thus
+  a unique key can be set on all fields.
+
+  HA_USE_AUTO_PARTITION
+  Set if the handler sets all tables to be partitioned by default.
+*/
+
+static uint partition_flags()
+{
+  return HA_CAN_PARTITION;
+}
+
+static uint alter_table_flags(uint flags __attribute__((unused)))
+{
+  return (HA_PARTITION_FUNCTION_SUPPORTED |
+          HA_FAST_CHANGE_PARTITION);
+}
+
+/*
+  Constructor method
+
+  SYNOPSIS
+    ha_partition()
+    table                       Table object
+
+  RETURN VALUE
+    NONE
+*/
 
 ha_partition::ha_partition(TABLE_SHARE *share)
   :handler(&partition_hton, share), m_part_info(NULL), m_create_handler(FALSE),
@@ -115,6 +171,17 @@
 }
 
 
+/*
+  Constructor method
+
+  SYNOPSIS
+    ha_partition()
+    part_info                       Partition info
+
+  RETURN VALUE
+    NONE
+*/
+
 ha_partition::ha_partition(partition_info *part_info)
   :handler(&partition_hton, NULL), m_part_info(part_info),
    m_create_handler(TRUE),
@@ -128,13 +195,28 @@
 }
 
 
+/*
+  Initialise handler object
+
+  SYNOPSIS
+    init_handler_variables()
+
+  RETURN VALUE
+    NONE
+*/
+
 void ha_partition::init_handler_variables()
 {
   active_index= MAX_KEY;
+  m_mode= 0;
+  m_open_test_lock= 0;
   m_file_buffer= NULL;
   m_name_buffer_ptr= NULL;
   m_engine_array= NULL;
   m_file= NULL;
+  m_reorged_file= NULL;
+  m_reorged_parts= 0;
+  m_added_file= NULL;
   m_tot_parts= 0;
   m_has_transactions= 0;
   m_pkey_is_clustered= 0;
@@ -171,6 +253,16 @@
 }
 
 
+/*
+  Destructor method
+
+  SYNOPSIS
+    ~ha_partition()
+
+  RETURN VALUE
+    NONE
+*/
+
 ha_partition::~ha_partition()
 {
   DBUG_ENTER("ha_partition::~ha_partition()");
@@ -188,6 +280,17 @@
 
 
 /*
+  Initialise partition handler object
+
+  SYNOPSIS
+    ha_initialise()
+
+  RETURN VALUE
+    1                         Error
+    0                         Success
+
+  DESCRIPTION
+
   The partition handler is only a layer on top of other engines. Thus it
   can't really perform anything without the underlying handlers. Thus we
   add this method as part of the allocation of a handler object.
@@ -217,6 +320,7 @@
      sort will be performed using the underlying handlers.
   5) primary_key_is_clustered, has_transactions and low_byte_first is
      calculated here.
+
 */
 
 int ha_partition::ha_initialise()
@@ -243,7 +347,7 @@
   }
   else if (get_from_handler_file(table_share->normalized_path.str))
   {
-    my_error(ER_OUTOFMEMORY, MYF(0), 129); //Temporary fix TODO print_error
+    mem_alloc_error(2);
     DBUG_RETURN(1);
   }
   /*
@@ -288,47 +392,119 @@
                 MODULE meta data changes
 ****************************************************************************/
 /*
-  This method is used to calculate the partition name, service routine to
-  the del_ren_cre_table method.
-*/
+  Create partition names
 
-static void create_partition_name(char *out, const char *in1, const char *in2)
+  SYNOPSIS
+    create_partition_name()
+    out:out                   Created partition name string
+    in1                       First part
+    in2                       Second part
+    name_variant              Normal, temporary or renamed partition name
+
+  RETURN VALUE
+    NONE
+
+  DESCRIPTION
+    This method is used to calculate the partition name, service routine to
+    the del_ren_cre_table method.
+*/
+
+#define NORMAL_PART_NAME 0
+#define TEMP_PART_NAME 1
+#define RENAMED_PART_NAME 2
+static void create_partition_name(char *out, const char *in1,
+                                  const char *in2, uint name_variant,
+                                  bool translate)
 {
-  strxmov(out, in1, "_", in2, NullS);
+  char transl_part_name[FN_REFLEN];
+  const char *transl_part;
+
+  if (translate)
+  {
+    tablename_to_filename(in2, transl_part_name, FN_REFLEN);
+    transl_part= transl_part_name;
+  }
+  else
+    transl_part= in2;
+  if (name_variant == NORMAL_PART_NAME)
+    strxmov(out, in1, "#P#", transl_part, NullS);
+  else if (name_variant == TEMP_PART_NAME)
+    strxmov(out, in1, "#P#", transl_part, "#TMP#", NullS);
+  else if (name_variant == RENAMED_PART_NAME)
+    strxmov(out, in1, "#P#", transl_part, "#REN#", NullS);
 }
 
 /*
-  This method is used to calculate the partition name, service routine to
+  Create subpartition name
+
+  SYNOPSIS
+    create_subpartition_name()
+    out:out                   Created partition name string
+    in1                       First part
+    in2                       Second part
+    in3                       Third part
+    name_variant              Normal, temporary or renamed partition name
+
+  RETURN VALUE
+    NONE
+
+  DESCRIPTION
+  This method is used to calculate the subpartition name, service routine to
   the del_ren_cre_table method.
 */
 
 static void create_subpartition_name(char *out, const char *in1,
-                                     const char *in2, const char *in3)
+                                     const char *in2, const char *in3,
+                                     uint name_variant)
 {
-  strxmov(out, in1, "_", in2, "_", in3, NullS);
+  char transl_part_name[FN_REFLEN], transl_subpart_name[FN_REFLEN];
+
+  tablename_to_filename(in2, transl_part_name, FN_REFLEN);
+  tablename_to_filename(in3, transl_subpart_name, FN_REFLEN);
+  if (name_variant == NORMAL_PART_NAME)
+    strxmov(out, in1, "#P#", transl_part_name,
+            "#SP#", transl_subpart_name, NullS);
+  else if (name_variant == TEMP_PART_NAME)
+    strxmov(out, in1, "#P#", transl_part_name,
+            "#SP#", transl_subpart_name, "#TMP#", NullS);
+  else if (name_variant == RENAMED_PART_NAME)
+    strxmov(out, in1, "#P#", transl_part_name,
+            "#SP#", transl_subpart_name, "#REN#", NullS);
 }
 
 
 /*
-  Used to delete a table. By the time delete_table() has been called all
-  opened references to this table will have been closed (and your globally
-  shared references released. The variable name will just be the name of
-  the table. You will need to remove any files you have created at this
-  point.
+  Delete a table
 
-  If you do not implement this, the default delete_table() is called from
-  handler.cc and it will delete all files with the file extentions returned
-  by bas_ext().
+  SYNOPSIS
+    delete_table()
+    name                    Full path of table name
 
-  Called from handler.cc by delete_table and  ha_create_table(). Only used
-  during create if the table_flag HA_DROP_BEFORE_CREATE was specified for
-  the storage engine.
+  RETURN VALUE
+    >0                        Error
+    0                         Success
+
+  DESCRIPTION
+    Used to delete a table. By the time delete_table() has been called all
+    opened references to this table will have been closed (and your globally
+    shared references released. The variable name will just be the name of
+    the table. You will need to remove any files you have created at this
+    point.
+
+    If you do not implement this, the default delete_table() is called from
+    handler.cc and it will delete all files with the file extentions returned
+    by bas_ext().
+
+    Called from handler.cc by delete_table and  ha_create_table(). Only used
+    during create if the table_flag HA_DROP_BEFORE_CREATE was specified for
+    the storage engine.
 */
 
 int ha_partition::delete_table(const char *name)
 {
   int error;
   DBUG_ENTER("ha_partition::delete_table");
+
   if ((error= del_ren_cre_table(name, NULL, NULL, NULL)))
     DBUG_RETURN(error);
   DBUG_RETURN(handler::delete_table(name));
@@ -336,19 +512,32 @@
 
 
 /*
-  Renames a table from one name to another from alter table call.
+  Rename a table
 
-  If you do not implement this, the default rename_table() is called from
-  handler.cc and it will delete all files with the file extentions returned
-  by bas_ext().
+  SYNOPSIS
+    rename_table()
+    from                      Full path of old table name
+    to                        Full path of new table name
+
+  RETURN VALUE
+    >0                        Error
+    0                         Success
+
+  DESCRIPTION
+    Renames a table from one name to another from alter table call.
+
+    If you do not implement this, the default rename_table() is called from
+    handler.cc and it will rename all files with the file extentions returned
+    by bas_ext().
 
-  Called from sql_table.cc by mysql_rename_table().
+    Called from sql_table.cc by mysql_rename_table().
 */
 
 int ha_partition::rename_table(const char *from, const char *to)
 {
   int error;
   DBUG_ENTER("ha_partition::rename_table");
+
   if ((error= del_ren_cre_table(from, to, NULL, NULL)))
     DBUG_RETURN(error);
   DBUG_RETURN(handler::rename_table(from, to));
@@ -356,11 +545,22 @@
 
 
 /*
-  create_handler_files is called to create any handler specific files
-  before opening the file with openfrm to later call ::create on the
-  file object.
-  In the partition handler this is used to store the names of partitions
-  and types of engines in the partitions.
+  Create the handler file (.par-file)
+
+  SYNOPSIS
+    create_handler_files()
+    name                              Full path of table name
+
+  RETURN VALUE
+    >0                        Error
+    0                         Success
+
+  DESCRIPTION
+    create_handler_files is called to create any handler specific files
+    before opening the file with openfrm to later call ::create on the
+    file object.
+    In the partition handler this is used to store the names of partitions
+    and types of engines in the partitions.
 */
 
 int ha_partition::create_handler_files(const char *name)
@@ -371,7 +571,6 @@
     We need to update total number of parts since we might write the handler
     file as part of a partition management command
   */
-  m_tot_parts= get_tot_partitions(m_part_info);
   if (create_handler_file(name))
   {
     my_error(ER_CANT_CREATE_HANDLER_FILE, MYF(0));
@@ -382,14 +581,27 @@
 
 
 /*
-  create() is called to create a table. The variable name will have the name
-  of the table. When create() is called you do not need to worry about
-  opening the table. Also, the FRM file will have already been created so
-  adjusting create_info will not do you any good. You can overwrite the frm
-  file at this point if you wish to change the table definition, but there
-  are no methods currently provided for doing that.
+  Create a partitioned table
 
-  Called from handle.cc by ha_create_table().
+  SYNOPSIS
+    create()
+    name                              Full path of table name
+    table_arg                         Table object
+    create_info                       Create info generated for CREATE TABLE
+
+  RETURN VALUE
+    >0                        Error
+    0                         Success
+
+  DESCRIPTION
+    create() is called to create a table. The variable name will have the name
+    of the table. When create() is called you do not need to worry about
+    opening the table. Also, the FRM file will have already been created so
+    adjusting create_info will not do you any good. You can overwrite the frm
+    file at this point if you wish to change the table definition, but there
+    are no methods currently provided for doing that.
+
+    Called from handler.cc by ha_create_table().
 */
 
 int ha_partition::create(const char *name, TABLE *table_arg,
@@ -409,23 +621,838 @@
   DBUG_RETURN(0);
 }
 
+
+/*
+  Drop partitions as part of ALTER TABLE of partitions
+
+  SYNOPSIS
+    drop_partitions()
+    path                        Complete path of db and table name
+
+  RETURN VALUE
+    >0                          Failure
+    0                           Success
+
+  DESCRIPTION
+    Use part_info object on handler object to deduce which partitions to
+    drop (each partition has a state attached to it)
+*/
+
 int ha_partition::drop_partitions(const char *path)
 {
   List_iterator<partition_element> part_it(m_part_info->partitions);
+  List_iterator<partition_element> temp_it(m_part_info->temp_partitions);
+  char part_name_buff[FN_REFLEN];
+  uint no_parts= m_part_info->partitions.elements;
+  uint part_count= 0;
+  uint no_subparts= m_part_info->no_subparts;
+  uint i= 0;
+  uint name_variant;
+  int  error= 1;
+  bool reorged_parts= (m_reorged_parts > 0);
+  bool temp_partitions= (m_part_info->temp_partitions.elements > 0);
+  DBUG_ENTER("ha_partition::drop_partitions");
+
+  if (temp_partitions)
+    no_parts= m_part_info->temp_partitions.elements;
+  do
+  {
+    partition_element *part_elem;
+    if (temp_partitions)
+    {
+      /*
+        We need to remove the reorganised partitions that were put in the
+        temp_partitions-list.
+      */
+      part_elem= temp_it++;
+      DBUG_ASSERT(part_elem->part_state == PART_TO_BE_DROPPED);
+    }
+    else
+      part_elem= part_it++;
+    if (part_elem->part_state == PART_TO_BE_DROPPED ||
+        part_elem->part_state == PART_IS_CHANGED)
+    {
+      handler *file;
+      /*
+        This part is to be dropped, meaning the part or all its subparts.
+      */
+      name_variant= NORMAL_PART_NAME;
+      if (part_elem->part_state == PART_IS_CHANGED ||
+          (part_elem->part_state == PART_TO_BE_DROPPED && temp_partitions))
+        name_variant= RENAMED_PART_NAME;
+      if (m_is_sub_partitioned)
+      {
+        List_iterator<partition_element> sub_it(part_elem->subpartitions);
+        uint j= 0, part;
+        do
+        {
+          partition_element *sub_elem= sub_it++;
+          part= i * no_subparts + j;
+          create_subpartition_name(part_name_buff, path,
+                                   part_elem->partition_name,
+                                   sub_elem->partition_name, name_variant);
+          if (reorged_parts)
+            file= m_reorged_file[part_count++];
+          else
+            file= m_file[part];
+          DBUG_PRINT("info", ("Drop subpartition %s", part_name_buff));
+          error= file->delete_table((const char *) part_name_buff);
+        } while (++j < no_subparts);
+      }
+      else
+      {
+        create_partition_name(part_name_buff, path,
+                              part_elem->partition_name, name_variant,
+                              TRUE);
+        if (reorged_parts)
+          file= m_reorged_file[part_count++];
+        else
+          file= m_file[i];
+        DBUG_PRINT("info", ("Drop partition %s", part_name_buff));
+        error= file->delete_table((const char *) part_name_buff);
+      }
+      if (part_elem->part_state == PART_IS_CHANGED)
+        part_elem->part_state= PART_NORMAL;
+      else
+        part_elem->part_state= PART_IS_DROPPED;
+    }
+  } while (++i < no_parts);
+  DBUG_RETURN(error);
+}
+
+
+/*
+  Rename partitions as part of ALTER TABLE of partitions
+
+  SYNOPSIS
+    rename_partitions()
+    path                        Complete path of db and table name
+
+  RETURN VALUE
+    TRUE                        Failure
+    FALSE                       Success
+
+  DESCRIPTION
+    When reorganising partitions, adding hash partitions and coalescing
+    partitions it can be necessary to rename partitions while holding
+    an exclusive lock on the table.
+    Which partitions to rename is given by state of partitions found by the
+    partition info struct referenced from the handler object
+*/
+
+int ha_partition::rename_partitions(const char *path)
+{
+  List_iterator<partition_element> part_it(m_part_info->partitions);
+  List_iterator<partition_element> temp_it(m_part_info->temp_partitions);
   char part_name_buff[FN_REFLEN];
+  char norm_name_buff[FN_REFLEN];
+  uint no_parts= m_part_info->partitions.elements;
+  uint part_count= 0;
+  uint no_subparts= m_part_info->no_subparts;
+  uint i= 0;
+  uint j= 0;
+  int error= 1;
+  uint temp_partitions= m_part_info->temp_partitions.elements;
+  handler *file;
+  partition_element *part_elem, *sub_elem;
+  DBUG_ENTER("ha_partition::rename_partitions");
+
+  if (temp_partitions)
+  {
+    do
+    {
+      part_elem= temp_it++;
+      if (m_is_sub_partitioned)
+      {
+        List_iterator<partition_element> sub_it(part_elem->subpartitions);
+        do
+        {
+          sub_elem= sub_it++;
+          file= m_reorged_file[part_count++];
+          create_subpartition_name(part_name_buff, path,
+                                   part_elem->partition_name,
+                                   sub_elem->partition_name,
+                                   RENAMED_PART_NAME);
+          create_subpartition_name(norm_name_buff, path,
+                                   part_elem->partition_name,
+                                   sub_elem->partition_name,
+                                   NORMAL_PART_NAME);
+          DBUG_PRINT("info", ("Rename subpartition from %s to %s",
+                     norm_name_buff, part_name_buff));
+          error= file->rename_table((const char *) norm_name_buff,
+                                    (const char *) part_name_buff);
+        } while (++j < no_subparts);
+      }
+      else
+      {
+        file= m_reorged_file[part_count++];
+        create_partition_name(part_name_buff, path,
+                              part_elem->partition_name, RENAMED_PART_NAME,
+                              TRUE);
+        create_partition_name(norm_name_buff, path,
+                              part_elem->partition_name, NORMAL_PART_NAME,
+                              TRUE);
+        DBUG_PRINT("info", ("Rename partition from %s to %s",
+                   norm_name_buff, part_name_buff));
+        error= file->rename_table((const char *) norm_name_buff,
+                                  (const char *) part_name_buff);
+      }
+    } while (++i < temp_partitions);
+  }
+  i= 0;
+  do
+  {
+    part_elem= part_it++;
+    if (part_elem->part_state == PART_IS_CHANGED ||
+        (part_elem->part_state == PART_IS_ADDED && temp_partitions))
+    {
+      if (m_is_sub_partitioned)
+      {
+        List_iterator<partition_element> sub_it(part_elem->subpartitions);
+        uint part;
+
+        j= 0;
+        do
+        {
+          sub_elem= sub_it++;
+          part= i * no_subparts + j;
+          create_subpartition_name(norm_name_buff, path,
+                                   part_elem->partition_name,
+                                   sub_elem->partition_name,
+                                   NORMAL_PART_NAME);
+          if (part_elem->part_state == PART_IS_CHANGED)
+          {
+            file= m_reorged_file[part_count++];
+            create_subpartition_name(part_name_buff, path,
+                                     part_elem->partition_name,
+                                     sub_elem->partition_name,
+                                     RENAMED_PART_NAME);
+            DBUG_PRINT("info", ("Rename subpartition from %s to %s",
+                       norm_name_buff, part_name_buff));
+            error= file->rename_table((const char *) norm_name_buff,
+                                      (const char *) part_name_buff);
+          }
+          file= m_new_file[part];
+          create_subpartition_name(part_name_buff, path,
+                                   part_elem->partition_name,
+                                   sub_elem->partition_name,
+                                   TEMP_PART_NAME);
+          DBUG_PRINT("info", ("Rename subpartition from %s to %s",
+                     part_name_buff, norm_name_buff));
+          error= file->rename_table((const char *) part_name_buff,
+                                    (const char *) norm_name_buff);
+        } while (++j < no_subparts);
+      }
+      else
+      {
+        create_partition_name(norm_name_buff, path,
+                              part_elem->partition_name, NORMAL_PART_NAME,
+                              TRUE);
+        if (part_elem->part_state == PART_IS_CHANGED)
+        {
+          file= m_reorged_file[part_count++];
+          create_partition_name(part_name_buff, path,
+                                part_elem->partition_name, RENAMED_PART_NAME,
+                                TRUE);
+          DBUG_PRINT("info", ("Rename partition from %s to %s",
+                     norm_name_buff, part_name_buff));
+          error= file->rename_table((const char *) norm_name_buff,
+                                    (const char *) part_name_buff);
+        }
+        file= m_new_file[i];
+        create_partition_name(part_name_buff, path,
+                              part_elem->partition_name, TEMP_PART_NAME,
+                              TRUE);
+        DBUG_PRINT("info", ("Rename partition from %s to %s",
+                   part_name_buff, norm_name_buff));
+        error= file->rename_table((const char *) part_name_buff,
+                                  (const char *) norm_name_buff);
+      }
+    }
+  } while (++i < no_parts);
+  DBUG_RETURN(error);
+}
+
+
+#define OPTIMIZE_PARTS 1
+#define ANALYZE_PARTS 2
+#define CHECK_PARTS   3
+#define REPAIR_PARTS 4
+
+/*
+  Optimize table
+
+  SYNOPSIS
+    optimize()
+    thd               Thread object
+    check_opt         Check/analyze/repair/optimize options
+
+  RETURN VALUES
+    >0                Error
+    0                 Success
+*/
+
+int ha_partition::optimize(THD *thd, HA_CHECK_OPT *check_opt)
+{
+  DBUG_ENTER("ha_partition::optimize");
+
+  DBUG_RETURN(handle_opt_partitions(thd, &thd->lex->check_opt, 
+                                    OPTIMIZE_PARTS, TRUE));
+}
+
+
+/*
+  Analyze table
+
+  SYNOPSIS
+    analyze()
+    thd               Thread object
+    check_opt         Check/analyze/repair/optimize options
+
+  RETURN VALUES
+    >0                Error
+    0                 Success
+*/
+
+int ha_partition::analyze(THD *thd, HA_CHECK_OPT *check_opt)
+{
+  DBUG_ENTER("ha_partition::analyze");
+
+  DBUG_RETURN(handle_opt_partitions(thd, &thd->lex->check_opt, 
+                                    ANALYZE_PARTS, TRUE));
+}
+
+
+/*
+  Check table
+
+  SYNOPSIS
+    check()
+    thd               Thread object
+    check_opt         Check/analyze/repair/optimize options
+
+  RETURN VALUES
+    >0                Error
+    0                 Success
+*/
+
+int ha_partition::check(THD *thd, HA_CHECK_OPT *check_opt)
+{
+  DBUG_ENTER("ha_partition::check");
+
+  DBUG_RETURN(handle_opt_partitions(thd, &thd->lex->check_opt, 
+                                    CHECK_PARTS, TRUE));
+}
+
+
+/*
+  Repair table
+
+  SYNOPSIS
+    repair()
+    thd               Thread object
+    check_opt         Check/analyze/repair/optimize options
+
+  RETURN VALUES
+    >0                Error
+    0                 Success
+*/
+
+int ha_partition::repair(THD *thd, HA_CHECK_OPT *check_opt)
+{
+  DBUG_ENTER("ha_partition::repair");
+
+  DBUG_RETURN(handle_opt_partitions(thd, &thd->lex->check_opt, 
+                                    REPAIR_PARTS, TRUE));
+}
+
+/*
+  Optimize partitions
+
+  SYNOPSIS
+    optimize_partitions()
+    thd                   Thread object
+  RETURN VALUE
+    >0                        Failure
+    0                         Success
+  DESCRIPTION
+    Call optimize on each partition marked with partition state PART_CHANGED
+*/
+
+int ha_partition::optimize_partitions(THD *thd)
+{
+  DBUG_ENTER("ha_partition::optimize_partitions");
+
+  DBUG_RETURN(handle_opt_partitions(thd, &thd->lex->check_opt, 
+                                    OPTIMIZE_PARTS, FALSE));
+}
+
+/*
+  Analyze partitions
+
+  SYNOPSIS
+    analyze_partitions()
+    thd                   Thread object
+  RETURN VALUE
+    >0                        Failure
+    0                         Success
+  DESCRIPTION
+    Call analyze on each partition marked with partition state PART_CHANGED
+*/
+
+int ha_partition::analyze_partitions(THD *thd)
+{
+  DBUG_ENTER("ha_partition::analyze_partitions");
+
+  DBUG_RETURN(handle_opt_partitions(thd, &thd->lex->check_opt, 
+                                    ANALYZE_PARTS, FALSE));
+}
+
+/*
+  Check partitions
+
+  SYNOPSIS
+    check_partitions()
+    thd                   Thread object
+  RETURN VALUE
+    >0                        Failure
+    0                         Success
+  DESCRIPTION
+    Call check on each partition marked with partition state PART_CHANGED
+*/
+
+int ha_partition::check_partitions(THD *thd)
+{
+  DBUG_ENTER("ha_partition::check_partitions");
+
+  DBUG_RETURN(handle_opt_partitions(thd, &thd->lex->check_opt, 
+                                    CHECK_PARTS, FALSE));
+}
+
+/*
+  Repair partitions
+
+  SYNOPSIS
+    repair_partitions()
+    thd                   Thread object
+  RETURN VALUE
+    >0                        Failure
+    0                         Success
+  DESCRIPTION
+    Call repair on each partition marked with partition state PART_CHANGED
+*/
+
+int ha_partition::repair_partitions(THD *thd)
+{
+  DBUG_ENTER("ha_partition::repair_partitions");
+
+  DBUG_RETURN(handle_opt_partitions(thd, &thd->lex->check_opt, 
+                                    REPAIR_PARTS, FALSE));
+}
+
+
+/*
+  Handle optimize/analyze/check/repair of one partition
+
+  SYNOPSIS
+    handle_opt_part()
+    thd                      Thread object
+    check_opt                Options
+    file                     Handler object of partition
+    flag                     Optimize/Analyze/Check/Repair flag
+
+  RETURN VALUE
+    >0                        Failure
+    0                         Success
+*/
+
+static int handle_opt_part(THD *thd, HA_CHECK_OPT *check_opt,
+                           handler *file, uint flag)
+{
+  int error;
+  DBUG_ENTER("handle_opt_part");
+  DBUG_PRINT("enter", ("flag = %u", flag));
+
+  if (flag == OPTIMIZE_PARTS)
+    error= file->optimize(thd, check_opt);
+  else if (flag == ANALYZE_PARTS)
+    error= file->analyze(thd, check_opt);
+  else if (flag == CHECK_PARTS)
+    error= file->check(thd, check_opt);
+  else if (flag == REPAIR_PARTS)
+    error= file->repair(thd, check_opt);
+  else
+  {
+    DBUG_ASSERT(FALSE);
+    error= 1;
+  }
+  if (error == HA_ADMIN_ALREADY_DONE)
+    error= 0;
+  DBUG_RETURN(error);
+}
+
+
+/*
+  Handle optimize/analyze/check/repair of partitions
+
+  SYNOPSIS
+    handle_opt_partitions()
+    thd                      Thread object
+    check_opt                Options
+    flag                     Optimize/Analyze/Check/Repair flag
+    all_parts                All partitions or only a subset
+
+  RETURN VALUE
+    >0                        Failure
+    0                         Success
+*/
+
+int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt,
+                                        uint flag, bool all_parts)
+{
+  List_iterator<partition_element> part_it(m_part_info->partitions);
   uint no_parts= m_part_info->no_parts;
-  uint no_subparts= m_part_info->no_subparts, i= 0;
+  uint no_subparts= m_part_info->no_subparts;
+  uint i= 0;
+  LEX *lex= thd->lex;
+  int error;
+  DBUG_ENTER("ha_partition::handle_opt_partitions");
+  DBUG_PRINT("enter", ("all_parts %u, flag= %u", all_parts, flag));
+
+  do
+  {
+    partition_element *part_elem= part_it++;
+    if (all_parts || part_elem->part_state == PART_CHANGED)
+    {
+      handler *file;
+      if (m_is_sub_partitioned)
+      {
+        List_iterator<partition_element> sub_it(part_elem->subpartitions);
+        uint j= 0, part;
+        do
+        {
+          partition_element *sub_elem= sub_it++;
+          part= i * no_subparts + j;
+          DBUG_PRINT("info", ("Optimize subpartition %u",
+                     part));
+          if ((error= handle_opt_part(thd, check_opt, m_file[part], flag)))
+          {
+            my_error(ER_GET_ERRNO, MYF(0), error);
+            DBUG_RETURN(TRUE);
+          }
+        } while (++j < no_subparts);
+      }
+      else
+      {
+        DBUG_PRINT("info", ("Optimize partition %u", i));
+        if ((error= handle_opt_part(thd, check_opt, m_file[i], flag)))
+        {
+          my_error(ER_GET_ERRNO, MYF(0), error);
+          DBUG_RETURN(TRUE);
+        }
+      }
+    }
+  } while (++i < no_parts);
+  DBUG_RETURN(FALSE);
+}
+
+/*
+  Prepare by creating a new partition
+
+  SYNOPSIS
+    prepare_new_partition()
+    table                      Table object
+    create_info                Create info from CREATE TABLE
+    file                       Handler object of new partition
+    part_name                  partition name
+
+  RETURN VALUE
+    >0                         Error
+    0                          Success
+*/
+
+int ha_partition::prepare_new_partition(TABLE *table,
+                                        HA_CREATE_INFO *create_info,
+                                        handler *file, const char *part_name)
+{
+  int error;
+  bool create_flag= FALSE;
+  bool open_flag= FALSE;
+  DBUG_ENTER("prepare_new_partition");
+
+  if ((error= file->create(part_name, table, create_info)))
+    goto error;
+  create_flag= TRUE;
+  if ((error= file->ha_open(table, part_name, m_mode, m_open_test_lock)))
+    goto error;
+  if ((error= file->external_lock(current_thd, m_lock_type)))
+    goto error;
+
+  DBUG_RETURN(0);
+error:
+  if (create_flag)
+    VOID(file->delete_table(part_name));
+  print_error(error, MYF(0));
+  DBUG_RETURN(error);
+}
+
+
+/*
+  Cleanup by removing all created partitions after error
+
+  SYNOPSIS
+    cleanup_new_partition()
+    part_count             Number of partitions to remove
+
+  RETURN VALUE
+    NONE
+
+  DESCRIPTION
+  TODO:
+    We must ensure that in the case that we get an error during the process
+    that we call external_lock with F_UNLCK, close the table and delete the
+    table in the case where we have been successful with prepare_handler.
+    We solve this by keeping an array of successful calls to prepare_handler
+    which can then be used to undo the call.
+*/
+
+void ha_partition::cleanup_new_partition(uint part_count)
+{
+  handler **save_m_file= m_file;
+  DBUG_ENTER("ha_partition::cleanup_new_partition");
+
+  if (m_added_file && m_added_file[0])
+  {
+    m_file= m_added_file;
+    m_added_file= NULL;
+
+    external_lock(current_thd, F_UNLCK);
+    /* delete_table also needed, a bit more complex */
+    close();
+
+    m_added_file= m_file;
+    m_file= save_m_file;
+  }
+  DBUG_VOID_RETURN;
+}
+
+/*
+  Implement the partition changes defined by ALTER TABLE of partitions
+
+  SYNOPSIS
+    change_partitions()
+    create_info                 HA_CREATE_INFO object describing all
+                                fields and indexes in table
+    path                        Complete path of db and table name
+    out: copied                 Output parameter where number of copied
+                                records are added
+    out: deleted                Output parameter where number of deleted
+                                records are added
+    pack_frm_data               Reference to packed frm file
+    pack_frm_len                Length of packed frm file
+
+  RETURN VALUE
+    >0                        Failure
+    0                         Success
+
+  DESCRIPTION
+    Add and copy if needed a number of partitions, during this operation
+    no other operation is ongoing in the server. This is used by
+    ADD PARTITION all types as well as by REORGANIZE PARTITION. For
+    one-phased implementations it is used also by DROP and COALESCE
+    PARTITIONs.
+    One-phased implementation needs the new frm file, other handlers will
+    get zero length and a NULL reference here.
+*/
+
+int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
+                                    const char *path,
+                                    ulonglong *copied,
+                                    ulonglong *deleted,
+                                    const void *pack_frm_data
+                                    __attribute__((unused)),
+                                    uint pack_frm_len
+                                    __attribute__((unused)))
+{
+  List_iterator<partition_element> part_it(m_part_info->partitions);
+  List_iterator <partition_element> t_it(m_part_info->temp_partitions);
+  char part_name_buff[FN_REFLEN];
+  uint no_parts= m_part_info->partitions.elements;
+  uint no_subparts= m_part_info->no_subparts;
+  uint i= 0;
+  uint no_remain_partitions, part_count;
+  handler **new_file_array;
   int error= 1;
-  DBUG_ENTER("ha_partition::drop_partitions()");
+  bool first;
+  bool copy_parts= FALSE;
+  uint temp_partitions= m_part_info->temp_partitions.elements;
+  THD *thd= current_thd;
+  DBUG_ENTER("ha_partition::change_partitions");
+
+  m_reorged_parts= 0;
+  if (!is_sub_partitioned(m_part_info))
+    no_subparts= 1;
+
+  /*
+    Step 1:
+      Calculate number of reorganised partitions and allocate space for
+      their handler references.
+  */
+  if (temp_partitions)
+  {
+    m_reorged_parts= temp_partitions * no_subparts;
+  }
+  else
+  {
+    do
+    {
+      partition_element *part_elem= part_it++;
+      if (part_elem->part_state == PART_CHANGED ||
+          part_elem->part_state == PART_REORGED_DROPPED)
+      {
+        m_reorged_parts+= no_subparts;
+      }
+    } while (++i < no_parts);
+  }
+  if (m_reorged_parts &&
+      !(m_reorged_file= (handler**)sql_calloc(sizeof(partition_element*)*
+                                              (m_reorged_parts + 1))))
+  {
+    mem_alloc_error(sizeof(partition_element*)*(m_reorged_parts+1));
+    DBUG_RETURN(TRUE);
+  }
+
+  /*
+    Step 2:
+      Calculate number of partitions after change and allocate space for
+      their handler references.
+  */
+  no_remain_partitions= 0;
+  if (temp_partitions)
+  {
+    no_remain_partitions= no_parts * no_subparts;
+  }
+  else
+  {
+    part_it.rewind();
+    i= 0;
+    do
+    {
+      partition_element *part_elem= part_it++;
+      if (part_elem->part_state == PART_NORMAL ||
+          part_elem->part_state == PART_TO_BE_ADDED ||
+          part_elem->part_state == PART_CHANGED)
+      {
+        no_remain_partitions+= no_subparts;
+      }
+    } while (++i < no_parts);
+  }
+  if (!(new_file_array= (handler**)sql_calloc(sizeof(handler*)*
+                                              (2*(no_remain_partitions + 1)))))
+  {
+    mem_alloc_error(sizeof(handler*)*2*(no_remain_partitions+1));
+    DBUG_RETURN(TRUE);
+  }
+  m_added_file= &new_file_array[no_remain_partitions + 1];
+
+  /*
+    Step 3:
+      Fill m_reorged_file with handler references and NULL at the end
+  */
+  if (m_reorged_parts)
+  {
+    i= 0;
+    part_count= 0;
+    first= TRUE;
+    part_it.rewind();
+    do
+    {
+      partition_element *part_elem= part_it++;
+      if (part_elem->part_state == PART_CHANGED ||
+          part_elem->part_state == PART_REORGED_DROPPED)
+      {
+        memcpy((void*)&m_reorged_file[part_count],
+               (void*)&m_file[i*no_subparts],
+               sizeof(handler*)*no_subparts);
+        part_count+= no_subparts;
+      }
+      else if (first && temp_partitions &&
+               part_elem->part_state == PART_TO_BE_ADDED)
+      {
+        /*
+          When doing an ALTER TABLE REORGANIZE PARTITION a number of
+          partitions is to be reorganised into a set of new partitions.
+          The reorganised partitions are in this case in the temp_partitions
+          list. We copy all of them in one batch and thus we only do this
+          until we find the first partition with state PART_TO_BE_ADDED
+          since this is where the new partitions go in and where the old
+          ones used to be.
+        */
+        first= FALSE;
+        memcpy((void*)m_reorged_file, &m_file[i*no_subparts],
+               sizeof(handler*)*m_reorged_parts*no_subparts);
+      }
+    } while (++i < no_parts);
+  }
 
+  /*
+    Step 4:
+      Fill new_array_file with handler references. Create the handlers if
+      needed.
+  */
+  i= 0;
+  part_count= 0;
+  part_it.rewind();
   do
   {
     partition_element *part_elem= part_it++;
-    if (part_elem->part_state == PART_IS_DROPPED)
+    if (part_elem->part_state == PART_NORMAL)
+    {
+      memcpy((void*)&new_file_array[part_count], (void*)&m_file[i],
+             sizeof(handler*)*no_subparts);
+      part_count+= no_subparts;
+    }
+    else if (part_elem->part_state == PART_CHANGED ||
+             part_elem->part_state == PART_TO_BE_ADDED)
+    {
+      uint j= 0;
+      do
+      {
+        if (!(new_file_array[part_count++]= get_new_handler(table->s,
+                                            thd->mem_root,
+                                            part_elem->engine_type)))
+        {
+          mem_alloc_error(sizeof(handler));
+          DBUG_RETURN(TRUE);
+        }
+      } while (++j < no_subparts);
+    }
+  } while (++i < no_parts);
+
+  /*
+    Step 5:
+      Create the new partitions and also open, lock and call external_lock
+      on them to prepare them for copy phase and also for later close
+      calls
+  */
+  i= 0;
+  part_count= 0;
+  part_it.rewind();
+  do
+  {
+    partition_element *part_elem= part_it++;
+    if (part_elem->part_state == PART_TO_BE_ADDED ||
+        part_elem->part_state == PART_CHANGED)
     {
       /*
-        This part is to be dropped, meaning the part or all its subparts.
+        A new partition needs to be created PART_TO_BE_ADDED means an
+        entirely new partition and PART_CHANGED means a changed partition
+        that will still exist with either more or less data in it.
       */
+      uint name_variant= NORMAL_PART_NAME;
+      if (part_elem->part_state == PART_CHANGED ||
+          (part_elem->part_state == PART_TO_BE_ADDED && temp_partitions))
+        name_variant= TEMP_PART_NAME;
       if (is_sub_partitioned(m_part_info))
       {
         List_iterator<partition_element> sub_it(part_elem->subpartitions);
@@ -435,44 +1462,204 @@
           partition_element *sub_elem= sub_it++;
           create_subpartition_name(part_name_buff, path,
                                    part_elem->partition_name,
-                                   sub_elem->partition_name);
+                                   sub_elem->partition_name,
+                                   name_variant);
           part= i * no_subparts + j;
-          DBUG_PRINT("info", ("Drop subpartition %s", part_name_buff));
-          error= m_file[part]->delete_table((const char *) part_name_buff);
+          DBUG_PRINT("info", ("Add subpartition %s", part_name_buff));
+          if ((error= prepare_new_partition(table, create_info,
+                                            new_file_array[part],
+                                            (const char *)part_name_buff)))
+          {
+            cleanup_new_partition(part_count);
+            DBUG_RETURN(TRUE);
+          }
+          m_added_file[part_count++]= new_file_array[part];
         } while (++j < no_subparts);
       }
       else
       {
         create_partition_name(part_name_buff, path,
-                              part_elem->partition_name);
-        DBUG_PRINT("info", ("Drop partition %s", part_name_buff));
-        error= m_file[i]->delete_table((const char *) part_name_buff);
+                              part_elem->partition_name, name_variant,
+                              TRUE);
+        DBUG_PRINT("info", ("Add partition %s", part_name_buff));
+        if ((error= prepare_new_partition(table, create_info,
+                                          new_file_array[i],
+                                          (const char *)part_name_buff)))
+        {
+          cleanup_new_partition(part_count);
+          DBUG_RETURN(TRUE);
+        }
+        m_added_file[part_count++]= new_file_array[i];
       }
     }
   } while (++i < no_parts);
-  DBUG_RETURN(error);
+
+  /*
+    Step 6:
+      State update to prepare for next write of the frm file.
+  */
+  i= 0;
+  part_it.rewind();
+  do
+  {
+    partition_element *part_elem= part_it++;
+    if (part_elem->part_state == PART_TO_BE_ADDED)
+      part_elem->part_state= PART_IS_ADDED;
+    else if (part_elem->part_state == PART_CHANGED)
+      part_elem->part_state= PART_IS_CHANGED;
+    else if (part_elem->part_state == PART_REORGED_DROPPED)
+      part_elem->part_state= PART_TO_BE_DROPPED;
+  } while (++i < no_parts);
+  for (i= 0; i < temp_partitions; i++)
+  {
+    partition_element *part_elem= t_it++;
+    DBUG_ASSERT(part_elem->part_state == PART_TO_BE_REORGED);
+    part_elem->part_state= PART_TO_BE_DROPPED;
+  }
+  m_new_file= new_file_array;
+  DBUG_RETURN(copy_partitions(copied, deleted));
 }
 
+
+/*
+  Copy partitions as part of ALTER TABLE of partitions
+
+  SYNOPSIS
+    copy_partitions()
+    out:copied                 Number of records copied
+    out:deleted                Number of records deleted
+
+  RETURN VALUE
+    >0                         Error code
+    0                          Success
+
+  DESCRIPTION
+    change_partitions has done all the preparations, now it is time to
+    actually copy the data from the reorganised partitions to the new
+    partitions.
+*/
+
+int ha_partition::copy_partitions(ulonglong *copied, ulonglong *deleted)
+{
+  uint reorg_part= 0;
+  int result= 0;
+  longlong func_value;
+  DBUG_ENTER("ha_partition::copy_partitions");
+
+  while (reorg_part < m_reorged_parts)
+  {
+    handler *file= m_reorged_file[reorg_part];
+    uint32 new_part;
+
+    late_extra_cache(reorg_part);
+    if ((result= file->ha_rnd_init(1)))
+      goto error;
+    while (TRUE)
+    {
+      if ((result= file->rnd_next(m_rec0)))
+      {
+        if (result == HA_ERR_RECORD_DELETED)
+          continue;                              //Probably MyISAM
+        if (result != HA_ERR_END_OF_FILE)
+          goto error;
+        /*
+          End-of-file reached, break out to continue with next partition or
+          end the copy process.
+        */
+        break;
+      }
+      /* Found record to insert into new handler */
+      if (m_part_info->get_partition_id(m_part_info, &new_part,
+                                        &func_value))
+      {
+        /*
+           This record is in the original table but will not be in the new
+           table since it doesn't fit into any partition any longer due to
+           changed partitioning ranges or list values.
+        */
+        deleted++;
+      }
+      else
+      {
+        /* Copy record to new handler */
+        copied++;
+        if ((result= m_new_file[new_part]->write_row(m_rec0)))
+          goto error;
+      }
+    }
+    late_extra_no_cache(reorg_part);
+    file->rnd_end();
+    reorg_part++;
+  }
+  DBUG_RETURN(FALSE);
+error:
+  print_error(result, MYF(0));
+  DBUG_RETURN(TRUE);
+}
+
+
+/*
+  Update create info as part of ALTER TABLE
+
+  SYNOPSIS
+    update_create_info()
+    create_info                   Create info from ALTER TABLE
+
+  RETURN VALUE
+    NONE
+
+  DESCRIPTION
+    Method empty so far
+*/
+
 void ha_partition::update_create_info(HA_CREATE_INFO *create_info)
 {
   return;
 }
 
 
+/*
+  Change comments specific to handler
+
+  SYNOPSIS
+    update_table_comment()
+    comment                       Original comment
+
+  RETURN VALUE
+    new comment 
+
+  DESCRIPTION
+    No comment changes so far
+*/
+
 char *ha_partition::update_table_comment(const char *comment)
 {
-  return (char*) comment;                       // Nothing to change
+  return (char*) comment;                       /* Nothing to change */
 }
 
 
 
 /*
-  Common routine to handle delete_table and rename_table.
-  The routine uses the partition handler file to get the
-  names of the partition instances. Both these routines
-  are called after creating the handler without table
-  object and thus the file is needed to discover the
-  names of the partitions and the underlying storage engines.
+  Handle delete, rename and create table
+
+  SYNOPSIS
+    del_ren_cre_table()
+    from                    Full path of old table
+    to                      Full path of new table
+    table_arg               Table object
+    create_info             Create info
+
+  RETURN VALUE
+    >0                      Error
+    0                       Success
+
+  DESCRIPTION
+    Common routine to handle delete_table and rename_table.
+    The routine uses the partition handler file to get the
+    names of the partition instances. Both these routines
+    are called after creating the handler without table
+    object and thus the file is needed to discover the
+    names of the partitions and the underlying storage engines.
 */
 
 uint ha_partition::del_ren_cre_table(const char *from,
@@ -480,7 +1667,8 @@
 				     TABLE *table_arg,
 				     HA_CREATE_INFO *create_info)
 {
-  int save_error= 0, error;
+  int save_error= 0;
+  int error;
   char from_buff[FN_REFLEN], to_buff[FN_REFLEN];
   char *name_buffer_ptr;
   uint i;
@@ -495,10 +1683,12 @@
   i= 0;
   do
   {
-    create_partition_name(from_buff, from, name_buffer_ptr);
+    create_partition_name(from_buff, from, name_buffer_ptr, NORMAL_PART_NAME,
+                          FALSE);
     if (to != NULL)
     {						// Rename branch
-      create_partition_name(to_buff, to, name_buffer_ptr);
+      create_partition_name(to_buff, to, name_buffer_ptr, NORMAL_PART_NAME,
+                            FALSE);
       error= (*file)->rename_table((const char*) from_buff,
 				   (const char*) to_buff);
     }
@@ -517,12 +1707,23 @@
   DBUG_RETURN(save_error);
 }
 
+/*
+  Find partition based on partition id
+
+  SYNOPSIS
+    find_partition_element()
+    part_id                   Partition id of partition looked for
+
+  RETURN VALUE
+    >0                        Reference to partition_element
+    0                         Partition not found
+*/
 
 partition_element *ha_partition::find_partition_element(uint part_id)
 {
   uint i;
   uint curr_part_id= 0;
-  List_iterator_fast < partition_element > part_it(m_part_info->partitions);
+  List_iterator_fast <partition_element> part_it(m_part_info->partitions);
 
   for (i= 0; i < m_part_info->no_parts; i++)
   {
@@ -548,18 +1749,32 @@
 }
 
 
+/*
+   Set up table share object before calling create on underlying handler
+
+   SYNOPSIS
+     set_up_table_before_create()
+     table                       Table object
+     info                        Create info
+     part_id                     Partition id of partition to set-up
+
+   RETURN VALUE
+     NONE
+
+   DESCRIPTION
+     Set up
+     1) Comment on partition
+     2) MAX_ROWS, MIN_ROWS on partition
+     3) Index file name on partition
+     4) Data file name on partition
+*/
+
 void ha_partition::set_up_table_before_create(TABLE *table,
 					      HA_CREATE_INFO *info,
 					      uint part_id)
 {
-  /*
-    Set up
-    1) Comment on partition
-    2) MAX_ROWS, MIN_ROWS on partition
-    3) Index file name on partition
-    4) Data file name on partition
-  */
   partition_element *part_elem= find_partition_element(part_id);
+
   if (!part_elem)
     return;                                     // Fatal error
   table->s->max_rows= part_elem->part_max_rows;
@@ -570,53 +1785,95 @@
 
 
 /*
-  Routine used to add two names with '_' in between then. Service routine
-  to create_handler_file
-  Include the NULL in the count of characters since it is needed as separator
-  between the partition names.
+  Add two names together
+
+  SYNOPSIS
+    name_add()
+    out:dest                          Destination string
+    first_name                        First name
+    sec_name                          Second name
+
+  RETURN VALUE
+    >0                                Error
+    0                                 Success
+
+  DESCRIPTION
+    Routine used to add two names with '_' in between then. Service routine
+    to create_handler_file
+    Include the NULL in the count of characters since it is needed as separator
+    between the partition names.
 */
 
-//static uint name_add(char *dest, const char *first_name, const char *sec_name)
-//{
-//  return (uint) (strxmov(dest, first_name, "_", sec_name, NullS) -dest) + 1;
-//}
+static uint name_add(char *dest, const char *first_name, const char *sec_name)
+{
+  return (uint) (strxmov(dest, first_name, "#SP#", sec_name, NullS) -dest) + 1;
+}
 
 
 /*
-  Method used to create handler file with names of partitions, their
-  engine types and the number of partitions.
+  Create the special .par file
+
+  SYNOPSIS
+    create_handler_file()
+    name                      Full path of table name
+
+  RETURN VALUE
+    >0                        Error code
+    0                         Success
+
+  DESCRIPTION
+    Method used to create handler file with names of partitions, their
+    engine types and the number of partitions.
 */
 
 bool ha_partition::create_handler_file(const char *name)
 {
   partition_element *part_elem, *subpart_elem;
   uint i, j, part_name_len, subpart_name_len;
-  uint tot_partition_words, tot_name_len;
+  uint tot_partition_words, tot_name_len, no_parts;
+  uint tot_parts= 0;
   uint tot_len_words, tot_len_byte, chksum, tot_name_words;
   char *name_buffer_ptr;
   uchar *file_buffer, *engine_array;
   bool result= TRUE;
   char file_name[FN_REFLEN];
+  char part_name[FN_REFLEN];
+  char subpart_name[FN_REFLEN];
   File file;
-  List_iterator_fast < partition_element > part_it(m_part_info->partitions);
+  List_iterator_fast <partition_element> part_it(m_part_info->partitions);
   DBUG_ENTER("create_handler_file");
 
-  DBUG_PRINT("info", ("table name = %s", name));
+  no_parts= m_part_info->partitions.elements;
+  DBUG_PRINT("info", ("table name = %s, no_parts = %u", name,
+                      no_parts));
   tot_name_len= 0;
-  for (i= 0; i < m_part_info->no_parts; i++)
+  for (i= 0; i < no_parts; i++)
   {
     part_elem= part_it++;
-    part_name_len= strlen(part_elem->partition_name);
+    if (part_elem->part_state != PART_NORMAL &&
+        part_elem->part_state != PART_IS_ADDED &&
+        part_elem->part_state != PART_IS_CHANGED)
+      continue;
+    tablename_to_filename(part_elem->partition_name, part_name,
+                          FN_REFLEN);
+    part_name_len= strlen(part_name);
     if (!m_is_sub_partitioned)
+    {
       tot_name_len+= part_name_len + 1;
+      tot_parts++;
+    }
     else
     {
-      List_iterator_fast<partition_element> sub_it(part_elem->subpartitions);
+      List_iterator_fast <partition_element> sub_it(part_elem->subpartitions);
       for (j= 0; j < m_part_info->no_subparts; j++)
       {
 	subpart_elem= sub_it++;
-	subpart_name_len= strlen(subpart_elem->partition_name);
-	tot_name_len+= part_name_len + subpart_name_len + 2;
+        tablename_to_filename(subpart_elem->partition_name,
+                              subpart_name,
+                              FN_REFLEN);
+	subpart_name_len= strlen(subpart_name);
+	tot_name_len+= part_name_len + subpart_name_len + 5;
+        tot_parts++;
       }
     }
   }
@@ -633,7 +1890,7 @@
 
      All padding bytes are zeroed
   */
-  tot_partition_words= (m_tot_parts + 3) / 4;
+  tot_partition_words= (tot_parts + 3) / 4;
   tot_name_words= (tot_name_len + 3) / 4;
   tot_len_words= 4 + tot_partition_words + tot_name_words;
   tot_len_byte= 4 * tot_len_words;
@@ -642,24 +1899,34 @@
   engine_array= (file_buffer + 12);
   name_buffer_ptr= (char*) (file_buffer + ((4 + tot_partition_words) * 4));
   part_it.rewind();
-  for (i= 0; i < m_part_info->no_parts; i++)
+  for (i= 0; i < no_parts; i++)
   {
     part_elem= part_it++;
+    if (part_elem->part_state != PART_NORMAL &&
+        part_elem->part_state != PART_IS_ADDED &&
+        part_elem->part_state != PART_IS_CHANGED)
+      continue;
     if (!m_is_sub_partitioned)
     {
-      name_buffer_ptr= strmov(name_buffer_ptr, part_elem->partition_name)+1;
+      tablename_to_filename(part_elem->partition_name, part_name, FN_REFLEN);
+      name_buffer_ptr= strmov(name_buffer_ptr, part_name)+1;
       *engine_array= (uchar) ha_legacy_type(part_elem->engine_type);
       DBUG_PRINT("info", ("engine: %u", *engine_array));
       engine_array++;
     }
     else
     {
-      List_iterator_fast<partition_element> sub_it(part_elem->subpartitions);
+      List_iterator_fast <partition_element> sub_it(part_elem->subpartitions);
       for (j= 0; j < m_part_info->no_subparts; j++)
       {
 	subpart_elem= sub_it++;
-        name_buffer_ptr= strmov(name_buffer_ptr, 
-                                subpart_elem->partition_name)+1;
+        tablename_to_filename(part_elem->partition_name, part_name,
+                              FN_REFLEN);
+        tablename_to_filename(subpart_elem->partition_name, subpart_name,
+                              FN_REFLEN);
+	name_buffer_ptr+= name_add(name_buffer_ptr,
+				   part_name,
+				   subpart_name);
 	*engine_array= (uchar) ha_legacy_type(part_elem->engine_type);
 	engine_array++;
       }
@@ -667,7 +1934,7 @@
   }
   chksum= 0;
   int4store(file_buffer, tot_len_words);
-  int4store(file_buffer + 8, m_tot_parts);
+  int4store(file_buffer + 8, tot_parts);
   int4store(file_buffer + 12 + (tot_partition_words * 4), tot_name_len);
   for (i= 0; i < tot_len_words; i++)
     chksum^= uint4korr(file_buffer + 4 * i);
@@ -691,6 +1958,15 @@
   DBUG_RETURN(result);
 }
 
+/*
+  Clear handler variables and free some memory
+
+  SYNOPSIS
+    clear_handler_file()
+
+  RETURN VALUE 
+    NONE
+*/
 
 void ha_partition::clear_handler_file()
 {
@@ -701,6 +1977,16 @@
   m_engine_array= NULL;
 }
 
+/*
+  Create underlying handler objects
+
+  SYNOPSIS
+    create_handlers()
+
+  RETURN VALUE
+    TRUE                  Error
+    FALSE                 Success
+*/
 
 bool ha_partition::create_handlers()
 {
@@ -734,10 +2020,20 @@
   DBUG_RETURN(FALSE);
 }
 
+/*
+  Create underlying handler objects from partition info
+
+  SYNOPSIS
+    new_handlers_from_part_info()
+
+  RETURN VALUE
+    TRUE                  Error
+    FALSE                 Success
+*/
 
 bool ha_partition::new_handlers_from_part_info()
 {
-  uint i, j;
+  uint i, j, part_count;
   partition_element *part_elem;
   uint alloc_len= (m_tot_parts + 1) * sizeof(handler*);
   List_iterator_fast <partition_element> part_it(m_part_info->partitions);
@@ -745,23 +2041,22 @@
   DBUG_ENTER("ha_partition::new_handlers_from_part_info");
 
   if (!(m_file= (handler **) sql_alloc(alloc_len)))
-    goto error;
+  {
+    mem_alloc_error(alloc_len);
+    goto error_end;
+  }
   bzero(m_file, alloc_len);
   DBUG_ASSERT(m_part_info->no_parts > 0);
 
   i= 0;
+  part_count= 0;
   /*
     Don't know the size of the underlying storage engine, invent a number of
     bytes allocated for error message if allocation fails
   */
-  alloc_len= 128; 
   do
   {
     part_elem= part_it++;
-    if (!(m_file[i]= get_new_handler(table_share, thd->mem_root,
-                                     part_elem->engine_type)))
-      goto error;
-    DBUG_PRINT("info", ("engine_type: %u", (uint) ha_legacy_type(part_elem->engine_type)));
     if (m_is_sub_partitioned)
     {
       for (j= 0; j < m_part_info->no_subparts; j++)
@@ -769,9 +2064,18 @@
 	if (!(m_file[i]= get_new_handler(table_share, thd->mem_root,
                                          part_elem->engine_type)))
           goto error;
-	DBUG_PRINT("info", ("engine_type: %u", (uint) ha_legacy_type(part_elem->engine_type)));
+	DBUG_PRINT("info", ("engine_type: %u",
+                   (uint) ha_legacy_type(part_elem->engine_type)));
       }
     }
+    else
+    {
+      if (!(m_file[part_count++]= get_new_handler(table_share, thd->mem_root,
+                                                  part_elem->engine_type)))
+        goto error;
+      DBUG_PRINT("info", ("engine_type: %u",
+                 (uint) ha_legacy_type(part_elem->engine_type)));
+    }
   } while (++i < m_part_info->no_parts);
   if (part_elem->engine_type == &myisam_hton)
   {
@@ -780,14 +2084,26 @@
   }
   DBUG_RETURN(FALSE);
 error:
-  my_error(ER_OUTOFMEMORY, MYF(0), alloc_len);
+  mem_alloc_error(sizeof(handler));
+error_end:
   DBUG_RETURN(TRUE);
 }
 
 
 /*
-  Open handler file to get partition names, engine types and number of
-  partitions.
+  Get info about partition engines and their names from the .par file
+
+  SYNOPSIS
+    get_from_handler_file()
+    name                        Full path of table name
+
+  RETURN VALUE
+    TRUE                        Error
+    FALSE                       Success
+
+  DESCRIPTION
+    Open handler file to get partition names, engine types and number of
+    partitions.
 */
 
 bool ha_partition::get_from_handler_file(const char *name)
@@ -823,6 +2139,7 @@
   if (chksum)
     goto err2;
   m_tot_parts= uint4korr((file_buffer) + 8);
+  DBUG_PRINT("info", ("No of parts = %u", m_tot_parts));
   tot_partition_words= (m_tot_parts + 3) / 4;
   if (!(engine_array= (handlerton **) my_malloc(m_tot_parts * sizeof(handlerton*),MYF(0))))
     goto err2;
@@ -852,17 +2169,31 @@
   DBUG_RETURN(TRUE);
 }
 
+
 /****************************************************************************
                 MODULE open/close object
 ****************************************************************************/
 /*
-  Used for opening tables. The name will be the name of the file.
-  A table is opened when it needs to be opened. For instance
-  when a request comes in for a select on the table (tables are not
-  open and closed for each request, they are cached).
+  Open handler object
+
+  SYNOPSIS
+    open()
+    name                  Full path of table name
+    mode                  Open mode flags
+    test_if_locked        ?
+
+  RETURN VALUE
+    >0                    Error
+    0                     Success
+
+  DESCRIPTION
+    Used for opening tables. The name will be the name of the file.
+    A table is opened when it needs to be opened. For instance
+    when a request comes in for a select on the table (tables are not
+    open and closed for each request, they are cached).
 
-  Called from handler.cc by handler::ha_open(). The server opens all tables
-  by calling ha_open() which then calls the handler specific open().
+    Called from handler.cc by handler::ha_open(). The server opens all tables
+    by calling ha_open() which then calls the handler specific open().
 */
 
 int ha_partition::open(const char *name, int mode, uint test_if_locked)
@@ -875,6 +2206,8 @@
   DBUG_ENTER("ha_partition::open");
 
   ref_length= 0;
+  m_mode= mode;
+  m_open_test_lock= test_if_locked;
   m_part_field_array= m_part_info->full_part_field_array;
   if (get_from_handler_file(name))
     DBUG_RETURN(1);
@@ -910,7 +2243,8 @@
   file= m_file;
   do
   {
-    create_partition_name(name_buff, name, name_buffer_ptr);
+    create_partition_name(name_buff, name, name_buffer_ptr, NORMAL_PART_NAME,
+                          FALSE);
     if ((error= (*file)->ha_open(table, (const char*) name_buff, mode,
                                  test_if_locked)))
       goto err_handler;
@@ -932,7 +2266,7 @@
   /*
     Initialise priority queue, initialised to reading forward.
   */
-  if ((error= init_queue(&queue, m_tot_parts, (uint) PARTITION_BYTES_IN_POS,
+  if ((error= init_queue(&m_queue, m_tot_parts, (uint) PARTITION_BYTES_IN_POS,
                          0, key_rec_cmp, (void*)this)))
     goto err_handler;
   /*
@@ -950,28 +2284,45 @@
   DBUG_RETURN(error);
 }
 
+
 /*
-  Closes a table. We call the free_share() function to free any resources
-  that we have allocated in the "shared" structure.
+  Close handler object
+
+  SYNOPSIS
+    close()
 
-  Called from sql_base.cc, sql_select.cc, and table.cc.
-  In sql_select.cc it is only used to close up temporary tables or during
-  the process where a temporary table is converted over to being a
-  myisam table.
-  For sql_base.cc look at close_data_tables().
+  RETURN VALUE
+    >0                   Error code
+    0                    Success
+
+  DESCRIPTION
+    Called from sql_base.cc, sql_select.cc, and table.cc.
+    In sql_select.cc it is only used to close up temporary tables or during
+    the process where a temporary table is converted over to being a
+    myisam table.
+    For sql_base.cc look at close_data_tables().
 */
 
 int ha_partition::close(void)
 {
   handler **file;
+  bool first= TRUE;
   DBUG_ENTER("ha_partition::close");
 
-  delete_queue(&queue);
+  delete_queue(&m_queue);
   file= m_file;
+
+repeat:
   do
   {
     (*file)->close();
   } while (*(++file));
+  if (first && m_added_file && m_added_file[0])
+  {
+    file= m_added_file;
+    first= FALSE;
+    goto repeat;
+  }
   DBUG_RETURN(0);
 }
 
@@ -986,30 +2337,47 @@
 */
 
 /*
-  First you should go read the section "locking functions for mysql" in
-  lock.cc to understand this.
-  This create a lock on the table. If you are implementing a storage engine
-  that can handle transactions look at ha_berkely.cc to see how you will
-  want to goo about doing this. Otherwise you should consider calling
-  flock() here.
-  Originally this method was used to set locks on file level to enable
-  several MySQL Servers to work on the same data. For transactional
-  engines it has been "abused" to also mean start and end of statements
-  to enable proper rollback of statements and transactions. When LOCK
-  TABLES has been issued the start_stmt method takes over the role of
-  indicating start of statement but in this case there is no end of
-  statement indicator(?).
+  Set external locks on table
 
-  Called from lock.cc by lock_external() and unlock_external(). Also called
-  from sql_table.cc by copy_data_between_tables().
+  SYNOPSIS
+    external_lock()
+    thd                    Thread object
+    lock_type              Type of external lock
+
+  RETURN VALUE
+    >0                   Error code
+    0                    Success
+
+  DESCRIPTION
+    First you should go read the section "locking functions for mysql" in
+    lock.cc to understand this.
+    This create a lock on the table. If you are implementing a storage engine
+    that can handle transactions look at ha_berkeley.cc to see how you will
+    want to go about doing this. Otherwise you should consider calling
+    flock() here.
+    Originally this method was used to set locks on file level to enable
+    several MySQL Servers to work on the same data. For transactional
+    engines it has been "abused" to also mean start and end of statements
+    to enable proper rollback of statements and transactions. When LOCK
+    TABLES has been issued the start_stmt method takes over the role of
+    indicating start of statement but in this case there is no end of
+    statement indicator(?).
+
+    Called from lock.cc by lock_external() and unlock_external(). Also called
+    from sql_table.cc by copy_data_between_tables().
 */
 
 int ha_partition::external_lock(THD *thd, int lock_type)
 {
   uint error;
   handler **file;
+  bool first= TRUE;
   DBUG_ENTER("ha_partition::external_lock");
+
   file= m_file;
+  m_lock_type= lock_type;
+
+repeat:
   do
   {
     if ((error= (*file)->external_lock(thd, lock_type)))
@@ -1018,7 +2386,13 @@
 	goto err_handler;
     }
   } while (*(++file));
-  m_lock_type= lock_type;                       // For the future (2009?)
+  if (first && m_added_file && m_added_file[0])
+  {
+    DBUG_ASSERT(lock_type == F_UNLCK);
+    file= m_added_file;
+    first= FALSE;
+    goto repeat;
+  }
   DBUG_RETURN(0);
 
 err_handler:
@@ -1029,36 +2403,49 @@
 
 
 /*
-  The idea with handler::store_lock() is the following:
+  Get the lock(s) for the table and perform conversion of locks if needed
 
-  The statement decided which locks we should need for the table
-  for updates/deletes/inserts we get WRITE locks, for SELECT... we get
-  read locks.
-
-  Before adding the lock into the table lock handler (see thr_lock.c)
-  mysqld calls store lock with the requested locks.  Store lock can now
-  modify a write lock to a read lock (or some other lock), ignore the
-  lock (if we don't want to use MySQL table locks at all) or add locks
-  for many tables (like we do when we are using a MERGE handler).
-
-  Berkeley DB for partition  changes all WRITE locks to TL_WRITE_ALLOW_WRITE
-  (which signals that we are doing WRITES, but we are still allowing other
-  reader's and writer's.
-
-  When releasing locks, store_lock() are also called. In this case one
-  usually doesn't have to do anything.
-
-  store_lock is called when holding a global mutex to ensure that only
-  one thread at a time changes the locking information of tables.
-
-  In some exceptional cases MySQL may send a request for a TL_IGNORE;
-  This means that we are requesting the same lock as last time and this
-  should also be ignored. (This may happen when someone does a flush
-  table when we have opened a part of the tables, in which case mysqld
-  closes and reopens the tables and tries to get the same locks at last
-  time).  In the future we will probably try to remove this.
+  SYNOPSIS
+    store_lock()
+    thd                   Thread object
+    to                    Lock object array
+    lock_type             Table lock type
+
+  RETURN VALUE
+    >0                   Error code
+    0                    Success
+
+  DESCRIPTION
+    The idea with handler::store_lock() is the following:
+
+    The statement decided which locks we should need for the table
+    for updates/deletes/inserts we get WRITE locks, for SELECT... we get
+    read locks.
+
+    Before adding the lock into the table lock handler (see thr_lock.c)
+    mysqld calls store lock with the requested locks.  Store lock can now
+    modify a write lock to a read lock (or some other lock), ignore the
+    lock (if we don't want to use MySQL table locks at all) or add locks
+    for many tables (like we do when we are using a MERGE handler).
+
+    Berkeley DB for partition  changes all WRITE locks to TL_WRITE_ALLOW_WRITE
+    (which signals that we are doing WRITES, but we are still allowing other
+    reader's and writer's.
+
+    When releasing locks, store_lock() is also called. In this case one
+    usually doesn't have to do anything.
+
+    store_lock is called when holding a global mutex to ensure that only
+    one thread at a time changes the locking information of tables.
+
+    In some exceptional cases MySQL may send a request for a TL_IGNORE;
+    This means that we are requesting the same lock as last time and this
+    should also be ignored. (This may happen when someone does a flush
+    table when we have opened a part of the tables, in which case mysqld
+    closes and reopens the tables and tries to get the same locks as last
+    time).  In the future we will probably try to remove this.
 
-  Called from lock.cc by get_lock_data().
+    Called from lock.cc by get_lock_data().
 */
 
 THR_LOCK_DATA **ha_partition::store_lock(THD *thd,
@@ -1067,6 +2454,7 @@
 {
   handler **file;
   DBUG_ENTER("ha_partition::store_lock");
+
   file= m_file;
   do
   {
@@ -1075,12 +2463,29 @@
   DBUG_RETURN(to);
 }
 
+/*
+  Start a statement when table is locked
+
+  SYNOPSIS
+    start_stmt()
+    thd                  Thread object
+    lock_type            Type of external lock
+
+  RETURN VALUE
+    >0                   Error code
+    0                    Success
+
+  DESCRIPTION
+    This method is called instead of external lock when the table is locked
+    before the statement is executed.
+*/
 
 int ha_partition::start_stmt(THD *thd, thr_lock_type lock_type)
 {
   int error= 0;
   handler **file;
   DBUG_ENTER("ha_partition::start_stmt");
+
   file= m_file;
   do
   {
@@ -1092,22 +2497,41 @@
 
 
 /*
-  Returns the number of store locks needed in call to store lock.
-  We return number of partitions since we call store_lock on each
-  underlying handler. Assists the above functions in allocating
-  sufficient space for lock structures.
+  Get number of lock objects returned in store_lock
+
+  SYNOPSIS
+    lock_count()
+
+  RETURN VALUE
+    Number of locks returned in call to store_lock
+
+  DESCRIPTION
+    Returns the number of store locks needed in call to store lock.
+    We return number of partitions since we call store_lock on each
+    underlying handler. Assists the above functions in allocating
+    sufficient space for lock structures.
 */
 
 uint ha_partition::lock_count() const
 {
   DBUG_ENTER("ha_partition::lock_count");
+
   DBUG_RETURN(m_no_locks);
 }
 
 
 /*
-  Record currently processed was not in the result set of the statement
-  and is thus unlocked. Used for UPDATE and DELETE queries.
+  Unlock last accessed row
+
+  SYNOPSIS
+    unlock_row()
+
+  RETURN VALUE
+    NONE
+
+  DESCRIPTION
+    Record currently processed was not in the result set of the statement
+    and is thus unlocked. Used for UPDATE and DELETE queries.
 */
 
 void ha_partition::unlock_row()
@@ -1122,43 +2546,56 @@
 ****************************************************************************/
 
 /*
-  write_row() inserts a row. buf() is a byte array of data, normally record[0].
-
-  You can use the field information to extract the data from the native byte
-  array type.
-
-  Example of this would be:
-  for (Field **field=table->field ; *field ; field++)
-  {
-    ...
-  }
+  Insert a row to the table
 
-  See ha_tina.cc for an partition of extracting all of the data as strings.
-  ha_berekly.cc has an partition of how to store it intact by "packing" it
-  for ha_berkeley's own native storage type.
-
-  See the note for update_row() on auto_increments and timestamps. This
-  case also applied to write_row().
+  SYNOPSIS
+    write_row()
+    buf                        The row in MySQL Row Format
 
-  Called from item_sum.cc, item_sum.cc, sql_acl.cc, sql_insert.cc,
-  sql_insert.cc, sql_select.cc, sql_table.cc, sql_udf.cc, and sql_update.cc.
+  RETURN VALUE
+    >0                         Error code
+    0                          Success
+
+  DESCRIPTION
+    write_row() inserts a row. buf() is a byte array of data, normally
+    record[0].
 
-  ADDITIONAL INFO:
+    You can use the field information to extract the data from the native byte
+    array type.
 
-  Most handlers set timestamp when calling write row if any such fields
-  exists. Since we are calling an underlying handler we assume the
-  underlying handler will assume this responsibility.
+    Example of this would be:
+    for (Field **field=table->field ; *field ; field++)
+    {
+      ...
+    }
 
-  Underlying handlers will also call update_auto_increment to calculate
-  the new auto increment value. We will catch the call to
-  get_auto_increment and ensure this increment value is maintained by
-  only one of the underlying handlers.
+    See ha_tina.cc for a variant of extracting all of the data as strings.
+    ha_berkeley.cc has a variant of how to store it intact by "packing" it
+    for ha_berkeley's own native storage type.
+
+    See the note for update_row() on auto_increments and timestamps. This
+    case also applied to write_row().
+
+    Called from item_sum.cc, item_sum.cc, sql_acl.cc, sql_insert.cc,
+    sql_insert.cc, sql_select.cc, sql_table.cc, sql_udf.cc, and sql_update.cc.
+
+    ADDITIONAL INFO:
+
+    Most handlers set timestamp when calling write row if any such fields
+    underlying handler will assume this responsibility.
+
+    Underlying handlers will also call update_auto_increment to calculate
+    the new auto increment value. We will catch the call to
+    get_auto_increment and ensure this increment value is maintained by
+    only one of the underlying handlers.
 */
 
 int ha_partition::write_row(byte * buf)
 {
   uint32 part_id;
   int error;
+  longlong func_value;
 #ifdef NOT_NEEDED
   byte *rec0= m_rec0;
 #endif
@@ -1168,17 +2605,19 @@
 #ifdef NOT_NEEDED
   if (likely(buf == rec0))
 #endif
-    error= m_part_info->get_partition_id(m_part_info, &part_id);
+    error= m_part_info->get_partition_id(m_part_info, &part_id,
+                                         &func_value);
 #ifdef NOT_NEEDED
   else
   {
     set_field_ptr(m_part_field_array, buf, rec0);
-    error= m_part_info->get_partition_id(m_part_info, &part_id);
+    error= m_part_info->get_partition_id(m_part_info, &part_id,
+                                         &func_value);
     set_field_ptr(m_part_field_array, rec0, buf);
   }
 #endif
   if (unlikely(error))
-    DBUG_RETURN(HA_ERR_NO_PARTITION_FOUND);
+    DBUG_RETURN(error);
   m_last_part= part_id;
   DBUG_PRINT("info", ("Insert in partition %d", part_id));
   DBUG_RETURN(m_file[part_id]->write_row(buf));
@@ -1186,33 +2625,46 @@
 
 
 /*
-  Yes, update_row() does what you expect, it updates a row. old_data will
-  have the previous row record in it, while new_data will have the newest
-  data in it.
-  Keep in mind that the server can do updates based on ordering if an
-  ORDER BY clause was used. Consecutive ordering is not guarenteed.
-
-  Currently new_data will not have an updated auto_increament record, or
-  and updated timestamp field. You can do these for partition by doing these:
-  if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
-    table->timestamp_field->set_time();
-  if (table->next_number_field && record == table->record[0])
-    update_auto_increment();
-
-  Called from sql_select.cc, sql_acl.cc, sql_update.cc, and sql_insert.cc.
-  new_data is always record[0]
-  old_data is normally record[1] but may be anything
+  Update an existing row
 
+  SYNOPSIS
+    update_row()
+    old_data                 Old record in MySQL Row Format
+    new_data                 New record in MySQL Row Format
+
+  RETURN VALUE
+    >0                         Error code
+    0                          Success
+
+  DESCRIPTION
+    Yes, update_row() does what you expect, it updates a row. old_data will
+    have the previous row record in it, while new_data will have the newest
+    data in it.
+    Keep in mind that the server can do updates based on ordering if an
+    ORDER BY clause was used. Consecutive ordering is not guarenteed.
+
+    Currently new_data will not have an updated auto_increament record, or
+    and updated timestamp field. You can do these for partition by doing these:
+    if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
+      table->timestamp_field->set_time();
+    if (table->next_number_field && record == table->record[0])
+      update_auto_increment();
+
+    Called from sql_select.cc, sql_acl.cc, sql_update.cc, and sql_insert.cc.
+    new_data is always record[0]
+    old_data is normally record[1] but may be anything
 */
 
 int ha_partition::update_row(const byte *old_data, byte *new_data)
 {
   uint32 new_part_id, old_part_id;
   int error;
+  longlong func_value;
   DBUG_ENTER("ha_partition::update_row");
 
   if ((error= get_parts_for_update(old_data, new_data, table->record[0],
-                                  m_part_info, &old_part_id, &new_part_id)))
+                                   m_part_info, &old_part_id, &new_part_id,
+                                   &func_value)))
   {
     DBUG_RETURN(error);
   }
@@ -1247,21 +2699,31 @@
 
 
 /*
-  This will delete a row. buf will contain a copy of the row to be deleted.
-  The server will call this right after the current row has been read
-  (from either a previous rnd_xxx() or index_xxx() call).
-  If you keep a pointer to the last row or can access a primary key it will
-  make doing the deletion quite a bit easier.
-  Keep in mind that the server does no guarentee consecutive deletions.
-  ORDER BY clauses can be used.
-
-  Called in sql_acl.cc and sql_udf.cc to manage internal table information.
-  Called in sql_delete.cc, sql_insert.cc, and sql_select.cc. In sql_select
-  it is used for removing duplicates while in insert it is used for REPLACE
-  calls.
+  Remove an existing row
 
-  buf is either record[0] or record[1]
+  SYNOPSIS
+    delete_row
+    buf                      Deleted row in MySQL Row Format
+
+  RETURN VALUE
+    >0                       Error Code
+    0                        Success
+
+  DESCRIPTION
+    This will delete a row. buf will contain a copy of the row to be deleted.
+    The server will call this right after the current row has been read
+    (from either a previous rnd_xxx() or index_xxx() call).
+    If you keep a pointer to the last row or can access a primary key it will
+    make doing the deletion quite a bit easier.
+    Keep in mind that the server does no guarentee consecutive deletions.
+    ORDER BY clauses can be used.
+
+    Called in sql_acl.cc and sql_udf.cc to manage internal table information.
+    Called in sql_delete.cc, sql_insert.cc, and sql_select.cc. In sql_select
+    it is used for removing duplicates while in insert it is used for REPLACE
+    calls.
 
+    buf is either record[0] or record[1]
 */
 
 int ha_partition::delete_row(const byte *buf)
@@ -1280,15 +2742,25 @@
 
 
 /*
-  Used to delete all rows in a table. Both for cases of truncate and
-  for cases where the optimizer realizes that all rows will be
-  removed as a result of a SQL statement.
-
-  Called from item_sum.cc by Item_func_group_concat::clear(),
-  Item_sum_count_distinct::clear(), and Item_func_group_concat::clear().
-  Called from sql_delete.cc by mysql_delete().
-  Called from sql_select.cc by JOIN::reinit().
-  Called from sql_union.cc by st_select_lex_unit::exec().
+  Delete all rows in a table
+
+  SYNOPSIS
+    delete_all_rows()
+
+  RETURN VALUE
+    >0                       Error Code
+    0                        Success
+
+  DESCRIPTION
+    Used to delete all rows in a table. Both for cases of truncate and
+    for cases where the optimizer realizes that all rows will be
+    removed as a result of a SQL statement.
+
+    Called from item_sum.cc by Item_func_group_concat::clear(),
+    Item_sum_count_distinct::clear(), and Item_func_group_concat::clear().
+    Called from sql_delete.cc by mysql_delete().
+    Called from sql_select.cc by JOIN::reinit().
+    Called from sql_union.cc by st_select_lex_unit::exec().
 */
 
 int ha_partition::delete_all_rows()
@@ -1296,6 +2768,7 @@
   int error;
   handler **file;
   DBUG_ENTER("ha_partition::delete_all_rows");
+
   file= m_file;
   do
   {
@@ -1305,14 +2778,26 @@
   DBUG_RETURN(0);
 }
 
+
 /*
-  rows == 0 means we will probably insert many rows
+  Start a large batch of insert rows
+
+  SYNOPSIS
+    start_bulk_insert()
+    rows                  Number of rows to insert
+
+  RETURN VALUE
+    NONE
+
+  DESCRIPTION
+    rows == 0 means we will probably insert many rows
 */
 
 void ha_partition::start_bulk_insert(ha_rows rows)
 {
   handler **file;
   DBUG_ENTER("ha_partition::start_bulk_insert");
+
   if (!rows)
   {
     /* Avoid allocation big caches in all underlaying handlers */
@@ -1328,6 +2813,17 @@
 }
 
 
+/*
+  Finish a large batch of insert rows
+
+  SYNOPSIS
+    end_bulk_insert()
+
+  RETURN VALUE
+    >0                      Error code
+    0                       Success
+*/
+
 int ha_partition::end_bulk_insert()
 {
   int error= 0;
@@ -1345,6 +2841,7 @@
   DBUG_RETURN(error);
 }
 
+
 /****************************************************************************
                 MODULE full table scan
 ****************************************************************************/
@@ -1356,18 +2853,22 @@
     scan	0  Initialize for random reads through rnd_pos()
 		1  Initialize for random scan through rnd_next()
 
-  NOTES
-  rnd_init() is called when the server wants the storage engine to do a
-  table scan or when the server wants to access data through rnd_pos.
-
-  When scan is used we will scan one handler partition at a time.
-  When preparing for rnd_pos we will init all handler partitions.
-  No extra cache handling is needed when scannning is not performed.
-
-  Before initialising we will call rnd_end to ensure that we clean up from
-  any previous incarnation of a table scan.
-  Called from filesort.cc, records.cc, sql_handler.cc, sql_select.cc,
-  sql_table.cc, and sql_update.cc.
+  RETURN VALUE
+    >0          Error code
+    0           Success
+
+  DESCRIPTION 
+    rnd_init() is called when the server wants the storage engine to do a
+    table scan or when the server wants to access data through rnd_pos.
+
+    When scan is used we will scan one handler partition at a time.
+    When preparing for rnd_pos we will init all handler partitions.
+    No extra cache handling is needed when scannning is not performed.
+
+    Before initialising we will call rnd_end to ensure that we clean up from
+    any previous incarnation of a table scan.
+    Called from filesort.cc, records.cc, sql_handler.cc, sql_select.cc,
+    sql_table.cc, and sql_update.cc.
 */
 
 int ha_partition::rnd_init(bool scan)
@@ -1421,10 +2922,22 @@
 }
 
 
+/*
+  End of a table scan
+
+  SYNOPSIS
+    rnd_end()
+
+  RETURN VALUE
+    >0          Error code
+    0           Success
+*/
+
 int ha_partition::rnd_end()
 {
   handler **file;
   DBUG_ENTER("ha_partition::rnd_end");
+
   switch (m_scan_value) {
   case 2:                                       // Error
     break;
@@ -1456,18 +2969,22 @@
     rnd_next()
     buf		buffer that should be filled with data
 
-  This is called for each row of the table scan. When you run out of records
-  you should return HA_ERR_END_OF_FILE.
-  The Field structure for the table is the key to getting data into buf
-  in a manner that will allow the server to understand it.
+  RETURN VALUE
+    >0          Error code
+    0           Success
+
+  DESCRIPTION
+    This is called for each row of the table scan. When you run out of records
+    you should return HA_ERR_END_OF_FILE.
+    The Field structure for the table is the key to getting data into buf
+    in a manner that will allow the server to understand it.
 
-  Called from filesort.cc, records.cc, sql_handler.cc, sql_select.cc,
-  sql_table.cc, and sql_update.cc.
+    Called from filesort.cc, records.cc, sql_handler.cc, sql_select.cc,
+    sql_table.cc, and sql_update.cc.
 */
 
 int ha_partition::rnd_next(byte *buf)
 {
-  DBUG_ASSERT(m_scan_value);
   uint part_id= m_part_spec.start_part;         // Cache of this variable
   handler *file= m_file[part_id];
   int result= HA_ERR_END_OF_FILE;
@@ -1526,37 +3043,38 @@
 }
 
 
-inline void store_part_id_in_pos(byte *pos, uint part_id)
-{
-  int2store(pos, part_id);
-}
+/*
+  Save position of current row
 
-inline uint get_part_id_from_pos(const byte *pos)
-{
-  return uint2korr(pos);
-}
+  SYNOPSIS
+    position()
+    record             Current record in MySQL Row Format
 
-/*
-  position() is called after each call to rnd_next() if the data needs
-  to be ordered. You can do something like the following to store
-  the position:
-  ha_store_ptr(ref, ref_length, current_position);
+  RETURN VALUE
+    NONE
 
-  The server uses ref to store data. ref_length in the above case is
-  the size needed to store current_position. ref is just a byte array
-  that the server will maintain. If you are using offsets to mark rows, then
-  current_position should be the offset. If it is a primary key like in
-  BDB, then it needs to be a primary key.
+  DESCRIPTION
+    position() is called after each call to rnd_next() if the data needs
+    to be ordered. You can do something like the following to store
+    the position:
+    ha_store_ptr(ref, ref_length, current_position);
+
+    The server uses ref to store data. ref_length in the above case is
+    the size needed to store current_position. ref is just a byte array
+    that the server will maintain. If you are using offsets to mark rows, then
+    current_position should be the offset. If it is a primary key like in
+    BDB, then it needs to be a primary key.
 
-  Called from filesort.cc, sql_select.cc, sql_delete.cc and sql_update.cc.
+    Called from filesort.cc, sql_select.cc, sql_delete.cc and sql_update.cc.
 */
 
 void ha_partition::position(const byte *record)
 {
   handler *file= m_file[m_last_part];
   DBUG_ENTER("ha_partition::position");
+
   file->position(record);
-  store_part_id_in_pos(ref, m_last_part);
+  int2store(ref, m_last_part);
   memcpy((ref + PARTITION_BYTES_IN_POS), file->ref,
 	 (ref_length - PARTITION_BYTES_IN_POS));
 
@@ -1569,12 +3087,24 @@
 }
 
 /*
-  This is like rnd_next, but you are given a position to use
-  to determine the row. The position will be of the type that you stored in
-  ref. You can use ha_get_ptr(pos,ref_length) to retrieve whatever key
-  or position you saved when position() was called.
-  Called from filesort.cc records.cc sql_insert.cc sql_select.cc
-  sql_update.cc.
+  Read row using position
+
+  SYNOPSIS
+    rnd_pos()
+    out:buf                     Row read in MySQL Row Format
+    position                    Position of read row
+
+  RETURN VALUE
+    >0                          Error code
+    0                           Success
+
+  DESCRIPTION
+    This is like rnd_next, but you are given a position to use
+    to determine the row. The position will be of the type that you stored in
+    ref. You can use ha_get_ptr(pos,ref_length) to retrieve whatever key
+    or position you saved when position() was called.
+    Called from filesort.cc records.cc sql_insert.cc sql_select.cc
+    sql_update.cc.
 */
 
 int ha_partition::rnd_pos(byte * buf, byte *pos)
@@ -1583,7 +3113,7 @@
   handler *file;
   DBUG_ENTER("ha_partition::rnd_pos");
 
-  part_id= get_part_id_from_pos((const byte *) pos);
+  part_id= uint2korr((const byte *) pos);
   DBUG_ASSERT(part_id < m_tot_parts);
   file= m_file[part_id];
   m_last_part= part_id;
@@ -1611,8 +3141,20 @@
 */
 
 /*
-  index_init is always called before starting index scans (except when
-  starting through index_read_idx and using read_range variants).
+  Initialise handler before start of index scan
+
+  SYNOPSIS
+    index_init()
+    inx                Index number
+    sorted             Is rows to be returned in sorted order
+
+  RETURN VALUE
+    >0                 Error code
+    0                  Success
+
+  DESCRIPTION
+    index_init is always called before starting index scans (except when
+    starting through index_read_idx and using read_range variants).
 */
 
 int ha_partition::index_init(uint inx, bool sorted)
@@ -1643,8 +3185,18 @@
 
 
 /*
-  index_end is called at the end of an index scan to clean up any
-  things needed to clean up.
+  End of index scan
+
+  SYNOPSIS
+    index_end()
+
+  RETURN VALUE
+    >0                 Error code
+    0                  Success
+
+  DESCRIPTION
+    index_end is called at the end of an index scan to clean up any
+    things needed to clean up.
 */
 
 int ha_partition::index_end()
@@ -1669,25 +3221,49 @@
 
 
 /*
-  index_read starts a new index scan using a start key. The MySQL Server
-  will check the end key on its own. Thus to function properly the
-  partitioned handler need to ensure that it delivers records in the sort
-  order of the MySQL Server.
-  index_read can be restarted without calling index_end on the previous
-  index scan and without calling index_init. In this case the index_read
-  is on the same index as the previous index_scan. This is particularly
-  used in conjuntion with multi read ranges.
+  Read one record in an index scan and start an index scan
+
+  SYNOPSIS
+    index_read()
+    buf                    Read row in MySQL Row Format
+    key                    Key parts in consecutive order
+    key_len                Total length of key parts
+    find_flag              What type of key condition is used
+
+  RETURN VALUE
+    >0                 Error code
+    0                  Success
+
+  DESCRIPTION
+    index_read starts a new index scan using a start key. The MySQL Server
+    will check the end key on its own. Thus to function properly the
+    partitioned handler need to ensure that it delivers records in the sort
+    order of the MySQL Server.
+    index_read can be restarted without calling index_end on the previous
+    index scan and without calling index_init. In this case the index_read
+    is on the same index as the previous index_scan. This is particularly
+    used in conjuntion with multi read ranges.
 */
 
 int ha_partition::index_read(byte * buf, const byte * key,
 			     uint key_len, enum ha_rkey_function find_flag)
 {
   DBUG_ENTER("ha_partition::index_read");
+
   end_range= 0;
   DBUG_RETURN(common_index_read(buf, key, key_len, find_flag));
 }
 
 
+/*
+  Common routine for a number of index_read variants
+
+  SYNOPSIS
+    common_index_read
+  
+  see index_read for rest
+*/
+
 int ha_partition::common_index_read(byte *buf, const byte *key, uint key_len,
 				    enum ha_rkey_function find_flag)
 {
@@ -1736,18 +3312,30 @@
 
 
 /*
-  index_first() asks for the first key in the index.
-  This is similar to index_read except that there is no start key since
-  the scan starts from the leftmost entry and proceeds forward with
-  index_next.
+  Start an index scan from leftmost record and return first record
+
+  SYNOPSIS
+    index_first()
+    buf                 Read row in MySQL Row Format
 
-  Called from opt_range.cc, opt_sum.cc, sql_handler.cc,
-  and sql_select.cc.
+  RETURN VALUE
+    >0                  Error code
+    0                   Success
+
+  DESCRIPTION
+    index_first() asks for the first key in the index.
+    This is similar to index_read except that there is no start key since
+    the scan starts from the leftmost entry and proceeds forward with
+    index_next.
+
+    Called from opt_range.cc, opt_sum.cc, sql_handler.cc,
+    and sql_select.cc.
 */
 
 int ha_partition::index_first(byte * buf)
 {
   DBUG_ENTER("ha_partition::index_first");
+
   end_range= 0;
   m_index_scan_type= partition_index_first;
   DBUG_RETURN(common_first_last(buf));
@@ -1755,25 +3343,47 @@
 
 
 /*
-  index_last() asks for the last key in the index.
-  This is similar to index_read except that there is no start key since
-  the scan starts from the rightmost entry and proceeds forward with
-  index_prev.
+  Start an index scan from rightmost record and return first record
+  
+  SYNOPSIS
+    index_last()
+    buf                 Read row in MySQL Row Format
+
+  RETURN VALUE
+    >0                  Error code
+    0                   Success
+
+  DESCRIPTION
+    index_last() asks for the last key in the index.
+    This is similar to index_read except that there is no start key since
+    the scan starts from the rightmost entry and proceeds forward with
+    index_prev.
 
-  Called from opt_range.cc, opt_sum.cc, sql_handler.cc,
-  and sql_select.cc.
+    Called from opt_range.cc, opt_sum.cc, sql_handler.cc,
+    and sql_select.cc.
 */
 
 int ha_partition::index_last(byte * buf)
 {
   DBUG_ENTER("ha_partition::index_last");
+
   m_index_scan_type= partition_index_last;
   DBUG_RETURN(common_first_last(buf));
 }
 
+/*
+  Common routine for index_first/index_last
+
+  SYNOPSIS
+    common_index_first_last
+  
+  see index_first for rest
+*/
+
 int ha_partition::common_first_last(byte *buf)
 {
   int error;
+
   if ((error= partition_scan_set_up(buf, FALSE)))
     return error;
   if (!m_ordered_scan_ongoing)
@@ -1781,10 +3391,18 @@
   return handle_ordered_index_scan(buf);
 }
 
+
 /*
-  Positions an index cursor to the index specified in key. Fetches the
-  row if any.  This is only used to read whole keys.
-  TODO: Optimise this code to avoid index_init and index_end
+  Perform index read using index where always only one row is returned
+
+  SYNOPSIS
+    index_read_idx()
+    see index_read for rest of parameters and return values
+
+  DESCRIPTION
+    Positions an index cursor to the index specified in key. Fetches the
+    row if any.  This is only used to read whole keys.
+    TODO: Optimise this code to avoid index_init and index_end
 */
 
 int ha_partition::index_read_idx(byte * buf, uint index, const byte * key,
@@ -1793,32 +3411,60 @@
 {
   int res;
   DBUG_ENTER("ha_partition::index_read_idx");
+
   index_init(index, 0);
   res= index_read(buf, key, key_len, find_flag);
   index_end();
   DBUG_RETURN(res);
 }
 
+
 /*
-  This is used in join_read_last_key to optimise away an ORDER BY.
-  Can only be used on indexes supporting HA_READ_ORDER
+  Read last using key
+
+  SYNOPSIS
+    index_read_last()
+    buf                   Read row in MySQL Row Format
+    key                   Key
+    keylen                Length of key
+
+  RETURN VALUE
+    >0                    Error code
+    0                     Success
+
+  DESCRIPTION
+    This is used in join_read_last_key to optimise away an ORDER BY.
+    Can only be used on indexes supporting HA_READ_ORDER
 */
 
 int ha_partition::index_read_last(byte *buf, const byte *key, uint keylen)
 {
   DBUG_ENTER("ha_partition::index_read_last");
+
   m_ordered= TRUE;				// Safety measure
   DBUG_RETURN(index_read(buf, key, keylen, HA_READ_PREFIX_LAST));
 }
 
 
 /*
-  Used to read forward through the index.
+  Read next record in a forward index scan
+
+  SYNOPSIS
+    index_next()
+    buf                   Read row in MySQL Row Format
+
+  RETURN VALUE
+    >0                    Error code
+    0                     Success
+
+  DESCRIPTION
+    Used to read forward through the index.
 */
 
 int ha_partition::index_next(byte * buf)
 {
   DBUG_ENTER("ha_partition::index_next");
+
   /*
     TODO(low priority):
     If we want partition to work with the HANDLER commands, we
@@ -1834,13 +3480,27 @@
 
 
 /*
-  This routine is used to read the next but only if the key is the same
-  as supplied in the call.
+  Read next record special
+
+  SYNOPSIS
+    index_next_same()
+    buf                   Read row in MySQL Row Format
+    key                   Key
+    keylen                Length of key
+
+  RETURN VALUE
+    >0                    Error code
+    0                     Success
+
+  DESCRIPTION
+    This routine is used to read the next but only if the key is the same
+    as supplied in the call.
 */
 
 int ha_partition::index_next_same(byte *buf, const byte *key, uint keylen)
 {
   DBUG_ENTER("ha_partition::index_next_same");
+
   DBUG_ASSERT(keylen == m_start_key.length);
   DBUG_ASSERT(m_index_scan_type != partition_index_last);
   if (!m_ordered_scan_ongoing)
@@ -1848,13 +3508,26 @@
   DBUG_RETURN(handle_ordered_next(buf, TRUE));
 }
 
+
 /*
-  Used to read backwards through the index.
+  Read next record when performing index scan backwards
+
+  SYNOPSIS
+    index_prev()
+    buf                   Read row in MySQL Row Format
+
+  RETURN VALUE
+    >0                    Error code
+    0                     Success
+
+  DESCRIPTION
+    Used to read backwards through the index.
 */
 
 int ha_partition::index_prev(byte * buf)
 {
   DBUG_ENTER("ha_partition::index_prev");
+
   /* TODO: read comment in index_next */
   DBUG_ASSERT(m_index_scan_type != partition_index_first);
   DBUG_RETURN(handle_ordered_prev(buf));
@@ -1862,10 +3535,24 @@
 
 
 /*
-  We reimplement read_range_first since we don't want the compare_key
-  check at the end. This is already performed in the partition handler.
-  read_range_next is very much different due to that we need to scan
-  all underlying handlers.
+  Start a read of one range with start and end key
+
+  SYNOPSIS
+    read_range_first()
+    start_key           Specification of start key
+    end_key             Specification of end key
+    eq_range_arg        Is it equal range
+    sorted              Should records be returned in sorted order
+
+  RETURN VALUE
+    >0                    Error code
+    0                     Success
+
+  DESCRIPTION
+    We reimplement read_range_first since we don't want the compare_key
+    check at the end. This is already performed in the partition handler.
+    read_range_next is very much different due to that we need to scan
+    all underlying handlers.
 */
 
 int ha_partition::read_range_first(const key_range *start_key,
@@ -1874,6 +3561,7 @@
 {
   int error;
   DBUG_ENTER("ha_partition::read_range_first");
+
   m_ordered= sorted;
   eq_range= eq_range_arg;
   end_range= 0;
@@ -1902,9 +3590,21 @@
 }
 
 
+/*
+  Read next record in read of a range with start and end key
+
+  SYNOPSIS
+    read_range_next()
+
+  RETURN VALUE
+    >0                    Error code
+    0                     Success
+*/
+
 int ha_partition::read_range_next()
 {
   DBUG_ENTER("ha_partition::read_range_next");
+
   if (m_ordered)
   {
     DBUG_RETURN(handler::read_range_next());
@@ -1913,6 +3613,22 @@
 }
 
 
+/*
+  Common routine to set up scans
+
+  SYNOPSIS
+    buf                  Buffer to later return record in
+    idx_read_flag        Is it index scan
+
+  RETURN VALUE
+    >0                    Error code
+    0                     Success
+
+  DESCRIPTION
+    This is where we check which partitions to actually scan if not all
+    of them
+*/
+
 int ha_partition::partition_scan_set_up(byte * buf, bool idx_read_flag)
 {
   DBUG_ENTER("ha_partition::partition_scan_set_up");
@@ -1957,16 +3673,29 @@
   Unordered Index Scan Routines
 ****************************************************************************/
 /*
-  These routines are used to scan partitions without considering order.
-  This is performed in two situations.
-  1) In read_multi_range this is the normal case
-  2) When performing any type of index_read, index_first, index_last where
-  all fields in the partition function is bound. In this case the index
-  scan is performed on only one partition and thus it isn't necessary to
-  perform any sort.
+  Common routine to handle index_next with unordered results
+
+  SYNOPSIS
+    handle_unordered_next()
+    out:buf                       Read row in MySQL Row Format
+    next_same                     Called from index_next_same
+
+  RETURN VALUE
+    HA_ERR_END_OF_FILE            End of scan
+    0                             Success
+    other                         Error code
+
+  DESCRIPTION
+    These routines are used to scan partitions without considering order.
+    This is performed in two situations.
+    1) In read_multi_range this is the normal case
+    2) When performing any type of index_read, index_first, index_last where
+    all fields in the partition function is bound. In this case the index
+    scan is performed on only one partition and thus it isn't necessary to
+    perform any sort.
 */
 
-int ha_partition::handle_unordered_next(byte *buf, bool next_same)
+int ha_partition::handle_unordered_next(byte *buf, bool is_next_same)
 {
   handler *file= file= m_file[m_part_spec.start_part];
   int error;
@@ -1976,7 +3705,7 @@
     We should consider if this should be split into two functions as
     next_same is alwas a local constant
   */
-  if (next_same)
+  if (is_next_same)
   {
     if (!(error= file->index_next_same(buf, m_start_key.key,
                                        m_start_key.length)))
@@ -2005,8 +3734,20 @@
 
 
 /*
-  This routine is used to start the index scan on the next partition.
-  Both initial start and after completing scan on one partition.
+  Handle index_next when changing to new partition
+
+  SYNOPSIS
+    handle_unordered_scan_next_partition()
+    buf                       Read row in MySQL Row Format
+
+  RETURN VALUE
+    HA_ERR_END_OF_FILE            End of scan
+    0                             Success
+    other                         Error code
+
+  DESCRIPTION
+    This routine is used to start the index scan on the next partition.
+    Both initial start and after completing scan on one partition.
 */
 
 int ha_partition::handle_unordered_scan_next_partition(byte * buf)
@@ -2054,30 +3795,43 @@
 
 
 /*
-  This part contains the logic to handle index scans that require ordered
-  output. This includes all except those started by read_range_first with
-  the flag ordered set to FALSE. Thus most direct index_read and all
-  index_first and index_last.
-
-  We implement ordering by keeping one record plus a key buffer for each
-  partition. Every time a new entry is requested we will fetch a new
-  entry from the partition that is currently not filled with an entry.
-  Then the entry is put into its proper sort position.
-
-  Returning a record is done by getting the top record, copying the
-  record to the request buffer and setting the partition as empty on
-  entries.
+  Common routine to start index scan with ordered results
+
+  SYNOPSIS
+    handle_ordered_index_scan()
+    out:buf                       Read row in MySQL Row Format
+
+  RETURN VALUE
+    HA_ERR_END_OF_FILE            End of scan
+    0                             Success
+    other                         Error code
+
+  DESCRIPTION
+    This part contains the logic to handle index scans that require ordered
+    output. This includes all except those started by read_range_first with
+    the flag ordered set to FALSE. Thus most direct index_read and all
+    index_first and index_last.
+
+    We implement ordering by keeping one record plus a key buffer for each
+    partition. Every time a new entry is requested we will fetch a new
+    entry from the partition that is currently not filled with an entry.
+    Then the entry is put into its proper sort position.
+
+    Returning a record is done by getting the top record, copying the
+    record to the request buffer and setting the partition as empty on
+    entries.
 */
 
 int ha_partition::handle_ordered_index_scan(byte *buf)
 {
-  uint i, j= 0;
+  uint i;
+  uint j= 0;
   bool found= FALSE;
   bool reverse_order= FALSE;
   DBUG_ENTER("ha_partition::handle_ordered_index_scan");
 
   m_top_entry= NO_CURRENT_PART_ID;
-  queue_remove_all(&queue);
+  queue_remove_all(&m_queue);
   for (i= m_part_spec.start_part; i <= m_part_spec.end_part; i++)
   {
     int error;
@@ -2110,7 +3864,7 @@
       /*
         Initialise queue without order first, simply insert
       */
-      queue_element(&queue, j++)= (byte*)queue_buf(i);
+      queue_element(&m_queue, j++)= (byte*)queue_buf(i);
     }
     else if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
     {
@@ -2123,10 +3877,10 @@
       We found at least one partition with data, now sort all entries and
       after that read the first entry and copy it to the buffer to return in.
     */
-    queue_set_max_at_top(&queue, reverse_order);
-    queue_set_cmp_arg(&queue, (void*)m_curr_key_info);
-    queue.elements= j;
-    queue_fix(&queue);
+    queue_set_max_at_top(&m_queue, reverse_order);
+    queue_set_cmp_arg(&m_queue, (void*)m_curr_key_info);
+    m_queue.elements= j;
+    queue_fix(&m_queue);
     return_top_record(buf);
     DBUG_PRINT("info", ("Record returned from partition %d", m_top_entry));
     DBUG_RETURN(0);
@@ -2135,11 +3889,23 @@
 }
 
 
+/*
+  Return the top record in sort order
+
+  SYNOPSIS
+    return_top_record()
+    out:buf                  Row returned in MySQL Row Format
+
+  RETURN VALUE
+    NONE
+*/
+
 void ha_partition::return_top_record(byte *buf)
 {
   uint part_id;
-  byte *key_buffer= queue_top(&queue);
+  byte *key_buffer= queue_top(&m_queue);
   byte *rec_buffer= key_buffer + PARTITION_BYTES_IN_POS;
+
   part_id= uint2korr(key_buffer);
   memcpy(buf, rec_buffer, m_rec_length);
   m_last_part= part_id;
@@ -2147,14 +3913,28 @@
 }
 
 
-int ha_partition::handle_ordered_next(byte *buf, bool next_same)
+/*
+  Common routine to handle index_next with ordered results
+
+  SYNOPSIS
+    handle_ordered_next()
+    out:buf                       Read row in MySQL Row Format
+    next_same                     Called from index_next_same
+
+  RETURN VALUE
+    HA_ERR_END_OF_FILE            End of scan
+    0                             Success
+    other                         Error code
+*/
+
+int ha_partition::handle_ordered_next(byte *buf, bool is_next_same)
 {
   int error;
   uint part_id= m_top_entry;
   handler *file= m_file[part_id];
   DBUG_ENTER("ha_partition::handle_ordered_next");
 
-  if (!next_same)
+  if (!is_next_same)
     error= file->index_next(rec_buf(part_id));
   else
     error= file->index_next_same(rec_buf(part_id), m_start_key.key,
@@ -2164,8 +3944,8 @@
     if (error == HA_ERR_END_OF_FILE)
     {
       /* Return next buffered row */
-      queue_remove(&queue, (uint) 0);
-      if (queue.elements)
+      queue_remove(&m_queue, (uint) 0);
+      if (m_queue.elements)
       {
          DBUG_PRINT("info", ("Record returned from partition %u (2)",
                      m_top_entry));
@@ -2175,25 +3955,39 @@
     }
     DBUG_RETURN(error);
   }
-  queue_replaced(&queue);
+  queue_replaced(&m_queue);
   return_top_record(buf);
   DBUG_PRINT("info", ("Record returned from partition %u", m_top_entry));
   DBUG_RETURN(0);
 }
 
 
+/*
+  Common routine to handle index_prev with ordered results
+
+  SYNOPSIS
+    handle_ordered_prev()
+    out:buf                       Read row in MySQL Row Format
+
+  RETURN VALUE
+    HA_ERR_END_OF_FILE            End of scan
+    0                             Success
+    other                         Error code
+*/
+
 int ha_partition::handle_ordered_prev(byte *buf)
 {
   int error;
   uint part_id= m_top_entry;
   handler *file= m_file[part_id];
   DBUG_ENTER("ha_partition::handle_ordered_prev");
+
   if ((error= file->index_prev(rec_buf(part_id))))
   {
     if (error == HA_ERR_END_OF_FILE)
     {
-      queue_remove(&queue, (uint) 0);
-      if (queue.elements)
+      queue_remove(&m_queue, (uint) 0);
+      if (m_queue.elements)
       {
 	return_top_record(buf);
 	DBUG_PRINT("info", ("Record returned from partition %d (2)",
@@ -2203,17 +3997,34 @@
     }
     DBUG_RETURN(error);
   }
-  queue_replaced(&queue);
+  queue_replaced(&m_queue);
   return_top_record(buf);
   DBUG_PRINT("info", ("Record returned from partition %d", m_top_entry));
   DBUG_RETURN(0);
 }
 
 
+/*
+  Set fields in partition functions in read set for underlying handlers
+
+  SYNOPSIS
+    include_partition_fields_in_used_fields()
+
+  RETURN VALUE
+    NONE
+
+  DESCRIPTION
+    Some handlers only read fields as specified by the bitmap for the
+    read set. For partitioned handlers we always require that the
+    fields of the partition functions are read such that we can
+    calculate the partition id to place updated and deleted records.
+*/
+
 void ha_partition::include_partition_fields_in_used_fields()
 {
-  DBUG_ENTER("ha_partition::include_partition_fields_in_used_fields");
   Field **ptr= m_part_field_array;
+  DBUG_ENTER("ha_partition::include_partition_fields_in_used_fields");
+
   do
   {
     ha_set_bit_in_read_set((*ptr)->fieldnr);
@@ -2232,57 +4043,68 @@
 */
 
 /*
-  ::info() is used to return information to the optimizer.
-  Currently this table handler doesn't implement most of the fields
-  really needed. SHOW also makes use of this data
-  Another note, if your handler doesn't proved exact record count,
-  you will probably want to have the following in your code:
-  if (records < 2)
-    records = 2;
-  The reason is that the server will optimize for cases of only a single
-  record. If in a table scan you don't know the number of records
-  it will probably be better to set records to two so you can return
-  as many records as you need.
-
-  Along with records a few more variables you may wish to set are:
-    records
-    deleted
-    data_file_length
-    index_file_length
-    delete_length
-    check_time
-  Take a look at the public variables in handler.h for more information.
-
-  Called in:
-    filesort.cc
-    ha_heap.cc
-    item_sum.cc
-    opt_sum.cc
-    sql_delete.cc
-    sql_delete.cc
-    sql_derived.cc
-    sql_select.cc
-    sql_select.cc
-    sql_select.cc
-    sql_select.cc
-    sql_select.cc
-    sql_show.cc
-    sql_show.cc
-    sql_show.cc
-    sql_show.cc
-    sql_table.cc
-    sql_union.cc
-    sql_update.cc
-
-  Some flags that are not implemented
-    HA_STATUS_POS:
-      This parameter is never used from the MySQL Server. It is checked in a
-      place in MyISAM so could potentially be used by MyISAM specific programs.
-    HA_STATUS_NO_LOCK:
-    This is declared and often used. It's only used by MyISAM.
-    It means that MySQL doesn't need the absolute latest statistics
-    information. This may save the handler from doing internal locks while
-    retrieving statistics data.
+  General method to gather info from handler
+
+  SYNOPSIS
+    info()
+    flag              Specifies what info is requested
+
+  RETURN VALUE
+    NONE
+
+  DESCRIPTION
+    ::info() is used to return information to the optimizer.
+    Currently this table handler doesn't implement most of the fields
+    really needed. SHOW also makes use of this data
+    Another note, if your handler doesn't proved exact record count,
+    you will probably want to have the following in your code:
+    if (records < 2)
+      records = 2;
+    The reason is that the server will optimize for cases of only a single
+    record. If in a table scan you don't know the number of records
+    it will probably be better to set records to two so you can return
+    as many records as you need.
+
+    Along with records a few more variables you may wish to set are:
+      records
+      deleted
+      data_file_length
+      index_file_length
+      delete_length
+      check_time
+    Take a look at the public variables in handler.h for more information.
+
+    Called in:
+      filesort.cc
+      ha_heap.cc
+      item_sum.cc
+      opt_sum.cc
+      sql_delete.cc
+     sql_delete.cc
+     sql_derived.cc
+      sql_select.cc
+      sql_select.cc
+      sql_select.cc
+      sql_select.cc
+      sql_select.cc
+      sql_show.cc
+      sql_show.cc
+      sql_show.cc
+      sql_show.cc
+      sql_table.cc
+      sql_union.cc
+      sql_update.cc
+
+    Some flags that are not implemented
+      HA_STATUS_POS:
+        This parameter is never used from the MySQL Server. It is checked in a
+        place in MyISAM so could potentially be used by MyISAM specific
+        programs.
+      HA_STATUS_NO_LOCK:
+      This is declared and often used. It's only used by MyISAM.
+      It means that MySQL doesn't need the absolute latest statistics
+      information. This may save the handler from doing internal locks while
+      retrieving statistics data.
 */
 
 void ha_partition::info(uint flag)
@@ -2443,7 +4265,41 @@
 }
 
 
+void ha_partition::get_dynamic_partition_info(PARTITION_INFO *stat_info,
+                                              uint part_id)
+{
+  handler *file= m_file[part_id];
+  file->info(HA_STATUS_CONST | HA_STATUS_TIME | HA_STATUS_VARIABLE |
+             HA_STATUS_NO_LOCK);
+
+  stat_info->records= file->records;
+  stat_info->mean_rec_length= file->mean_rec_length;
+  stat_info->data_file_length= file->data_file_length;
+  stat_info->max_data_file_length= file->max_data_file_length;
+  stat_info->index_file_length= file->index_file_length;
+  stat_info->delete_length= file->delete_length;
+  stat_info->create_time= file->create_time;
+  stat_info->update_time= file->update_time;
+  stat_info->check_time= file->check_time;
+  stat_info->check_sum= 0;
+  if (file->table_flags() & (ulong) HA_HAS_CHECKSUM)
+    stat_info->check_sum= file->checksum();
+  return;
+}
+
+
 /*
+  General function to prepare handler for certain behavior
+
+  SYNOPSIS
+    extra()
+    operation              Operation type for extra call
+
+  RETURN VALUE
+    >0                     Error code
+    0                      Success
+
+  DESCRIPTION
   extra() is called whenever the server wishes to send a hint to
   the storage engine. The MyISAM engine implements the most hints.
 
@@ -2789,8 +4645,18 @@
 
 
 /*
-  This will in the future be called instead of extra(HA_EXTRA_RESET) as this
-  is such a common call
+  Special extra call to reset extra parameters
+
+  SYNOPSIS
+    reset()
+
+  RETURN VALUE
+    >0                   Error code
+    0                    Success
+
+  DESCRIPTION
+    This will in the future be called instead of extra(HA_EXTRA_RESET) as this
+    is such a common call
 */
 
 int ha_partition::reset(void)
@@ -2798,6 +4664,7 @@
   int result= 0, tmp;
   handler **file;
   DBUG_ENTER("ha_partition::reset");
+
   file= m_file;
   if (m_part_info)
     bitmap_clear_all(&m_part_info->used_partitions);
@@ -2810,15 +4677,40 @@
 }
 
 
+/*
+  Special extra method for HA_EXTRA_CACHE with cachesize as extra parameter
+
+  SYNOPSIS
+    extra_opt()
+    operation                      Must be HA_EXTRA_CACHE
+    cachesize                      Size of cache in full table scan
+
+  RETURN VALUE
+    >0                   Error code
+    0                    Success
+*/
+
 int ha_partition::extra_opt(enum ha_extra_function operation, ulong cachesize)
 {
   DBUG_ENTER("ha_partition::extra_opt()");
+
   DBUG_ASSERT(HA_EXTRA_CACHE == operation);
   prepare_extra_cache(cachesize);
   DBUG_RETURN(0);
 }
 
 
+/*
+  Call extra on handler with HA_EXTRA_CACHE and cachesize
+
+  SYNOPSIS
+    prepare_extra_cache()
+    cachesize                Size of cache for full table scan
+
+  RETURN VALUE
+    NONE
+*/
+
 void ha_partition::prepare_extra_cache(uint cachesize)
 {
   DBUG_ENTER("ha_partition::prepare_extra_cache()");
@@ -2834,11 +4726,24 @@
 }
 
 
+/*
+  Call extra on all partitions
+
+  SYNOPSIS
+    loop_extra()
+    operation             extra operation type
+
+  RETURN VALUE
+    >0                    Error code
+    0                     Success
+*/
+
 int ha_partition::loop_extra(enum ha_extra_function operation)
 {
   int result= 0, tmp;
   handler **file;
   DBUG_ENTER("ha_partition::loop_extra()");
+
   for (file= m_file; *file; file++)
   {
     if ((tmp= (*file)->extra(operation)))
@@ -2848,10 +4753,22 @@
 }
 
 
+/*
+  Call extra(HA_EXTRA_CACHE) on next partition_id
+
+  SYNOPSIS
+    late_extra_cache()
+    partition_id               Partition id to call extra on
+
+  RETURN VALUE
+    NONE
+*/
+
 void ha_partition::late_extra_cache(uint partition_id)
 {
   handler *file;
   DBUG_ENTER("ha_partition::late_extra_cache");
+
   if (!m_extra_cache)
     DBUG_VOID_RETURN;
   file= m_file[partition_id];
@@ -2863,10 +4780,22 @@
 }
 
 
+/*
+  Call extra(HA_EXTRA_NO_CACHE) on next partition_id
+
+  SYNOPSIS
+    late_extra_no_cache()
+    partition_id               Partition id to call extra on
+
+  RETURN VALUE
+    NONE
+*/
+
 void ha_partition::late_extra_no_cache(uint partition_id)
 {
   handler *file;
   DBUG_ENTER("ha_partition::late_extra_no_cache");
+
   if (!m_extra_cache)
     DBUG_VOID_RETURN;
   file= m_file[partition_id];
@@ -2879,12 +4808,34 @@
                 MODULE optimiser support
 ****************************************************************************/
 
+/*
+  Get keys to use for scanning
+
+  SYNOPSIS
+    keys_to_use_for_scanning()
+
+  RETURN VALUE
+    key_map of keys usable for scanning
+*/
+
 const key_map *ha_partition::keys_to_use_for_scanning()
 {
   DBUG_ENTER("ha_partition::keys_to_use_for_scanning");
+
   DBUG_RETURN(m_file[0]->keys_to_use_for_scanning());
 }
 
+
+/*
+  Return time for a scan of the table
+
+  SYNOPSIS
+    scan_time()
+
+  RETURN VALUE
+    time for scan
+*/
+
 double ha_partition::scan_time()
 {
   double scan_time= 0;
@@ -2898,28 +4849,53 @@
 
 
 /*
-  This will be optimised later to include whether or not the index can
-  be used with partitioning. To achieve we need to add another parameter
-  that specifies how many of the index fields that are bound in the ranges.
-  Possibly added as a new call to handlers.
+  Get time to read
+
+  SYNOPSIS
+    read_time()
+    index                Index number used
+    ranges               Number of ranges
+    rows                 Number of rows
+
+  RETURN VALUE
+    time for read
+
+  DESCRIPTION
+    This will be optimised later to include whether or not the index can
+    be used with partitioning. To achieve we need to add another parameter
+    that specifies how many of the index fields that are bound in the ranges.
+    Possibly added as a new call to handlers.
 */
 
 double ha_partition::read_time(uint index, uint ranges, ha_rows rows)
 {
   DBUG_ENTER("ha_partition::read_time");
+
   DBUG_RETURN(m_file[0]->read_time(index, ranges, rows));
 }
 
 /*
-  Given a starting key, and an ending key estimate the number of rows that
-  will exist between the two. end_key may be empty which in case determine
-  if start_key matches any rows.
-
-  Called from opt_range.cc by check_quick_keys().
-
-  monty: MUST be called for each range and added.
-	 Note that MySQL will assume that if this returns 0 there is no
-         matching rows for the range!
+  Find number of records in a range
+
+  SYNOPSIS
+    records_in_range()
+    inx                  Index number
+    min_key              Start of range
+    max_key              End of range
+
+  RETURN VALUE
+    Number of rows in range
+
+  DESCRIPTION
+    Given a starting key, and an ending key estimate the number of rows that
+    will exist between the two. end_key may be empty which in case determine
+    if start_key matches any rows.
+
+    Called from opt_range.cc by check_quick_keys().
+
+    monty: MUST be called for each range and added.
+          Note that MySQL will assume that if this returns 0 there is no
+          matching rows for the range!
 */
 
 ha_rows ha_partition::records_in_range(uint inx, key_range *min_key,
@@ -2938,6 +4914,16 @@
 }
 
 
+/*
+  Estimate upper bound of number of rows
+
+  SYNOPSIS
+    estimate_rows_upper_bound()
+
+  RETURN VALUE
+    Number of rows
+*/
+
 ha_rows ha_partition::estimate_rows_upper_bound()
 {
   ha_rows rows, tot_rows= 0;
@@ -2956,9 +4942,48 @@
 }
 
 
+/*
+  Is it ok to switch to a new engine for this table
+
+  SYNOPSIS
+    can_switch_engine()
+
+  RETURN VALUE
+    TRUE                  Ok
+    FALSE                 Not ok
+
+  DESCRIPTION
+    Used to ensure that tables with foreign key constraints are not moved
+    to engines without foreign key support.
+*/
+
+bool ha_partition::can_switch_engines()
+{
+  handler **file;
+  DBUG_ENTER("ha_partition::can_switch_engines");
+ 
+  file= m_file;
+  do
+  {
+    if (!(*file)->can_switch_engines())
+      DBUG_RETURN(FALSE);
+  } while (*(++file));
+  DBUG_RETURN(TRUE);
+}
+
+
+/*
+  Is table cache supported
+
+  SYNOPSIS
+    table_cache_type()
+
+*/
+
 uint8 ha_partition::table_cache_type()
 {
   DBUG_ENTER("ha_partition::table_cache_type");
+
   DBUG_RETURN(m_file[0]->table_cache_type());
 }
 
@@ -2970,6 +4995,7 @@
 const char *ha_partition::index_type(uint inx)
 {
   DBUG_ENTER("ha_partition::index_type");
+
   DBUG_RETURN(m_file[0]->index_type(inx));
 }
 
@@ -2977,8 +5003,11 @@
 void ha_partition::print_error(int error, myf errflag)
 {
   DBUG_ENTER("ha_partition::print_error");
+
   /* Should probably look for my own errors first */
   /* monty: needs to be called for the last used partition ! */
+  DBUG_PRINT("enter", ("error = %d", error));
+
   if (error == HA_ERR_NO_PARTITION_FOUND)
     my_error(ER_NO_PARTITION_FOR_GIVEN_VALUE, MYF(0),
              m_part_info->part_expr->val_int());
@@ -2991,6 +5020,7 @@
 bool ha_partition::get_error_message(int error, String *buf)
 {
   DBUG_ENTER("ha_partition::get_error_message");
+
   /* Should probably look for my own errors first */
   /* monty: needs to be called for the last used partition ! */
   DBUG_RETURN(m_file[0]->get_error_message(error, buf));
@@ -3015,7 +5045,8 @@
 { return ha_partition_ext; }
 
 
-uint ha_partition::min_of_the_max_uint(uint (handler::*operator_func)(void) const) const
+uint ha_partition::min_of_the_max_uint(
+                       uint (handler::*operator_func)(void) const) const
 {
   handler **file;
   uint min_of_the_max= ((*m_file)->*operator_func)();
@@ -3063,6 +5094,7 @@
 {
   handler **file;
   uint max= (*m_file)->extra_rec_buf_length();
+
   for (file= m_file, file++; *file; file++)
     if (max < (*file)->extra_rec_buf_length())
       max= (*file)->extra_rec_buf_length();
@@ -3074,6 +5106,7 @@
 {
   handler **file;
   uint max= (*m_file)->min_record_length(options);
+
   for (file= m_file, file++; *file; file++)
     if (max < (*file)->min_record_length(options))
       max= (*file)->min_record_length(options);
@@ -3085,10 +5118,23 @@
                 MODULE compare records
 ****************************************************************************/
 /*
-  We get two references and need to check if those records are the same.
-  If they belong to different partitions we decide that they are not
-  the same record. Otherwise we use the particular handler to decide if
-  they are the same. Sort in partition id order if not equal.
+  Compare two positions
+
+  SYNOPSIS
+    cmp_ref()
+    ref1                   First position
+    ref2                   Second position
+
+  RETURN VALUE
+    <0                     ref1 < ref2
+    0                      Equal
+    >0                     ref1 > ref2
+
+  DESCRIPTION
+    We get two references and need to check if those records are the same.
+    If they belong to different partitions we decide that they are not
+    the same record. Otherwise we use the particular handler to decide if
+    they are the same. Sort in partition id order if not equal.
 */
 
 int ha_partition::cmp_ref(const byte *ref1, const byte *ref2)
@@ -3097,9 +5143,10 @@
   my_ptrdiff_t diff1, diff2;
   handler *file;
   DBUG_ENTER("ha_partition::cmp_ref");
+
   if ((ref1[0] == ref2[0]) && (ref1[1] == ref2[1]))
   {
-    part_id= get_part_id_from_pos(ref1);
+    part_id= uint2korr(ref1);
     file= m_file[part_id];
     DBUG_ASSERT(part_id < m_tot_parts);
     DBUG_RETURN(file->cmp_ref((ref1 + PARTITION_BYTES_IN_POS),
@@ -3130,6 +5177,7 @@
 void ha_partition::restore_auto_increment()
 {
   DBUG_ENTER("ha_partition::restore_auto_increment");
+
   DBUG_VOID_RETURN;
 }
 
@@ -3144,6 +5192,7 @@
 ulonglong ha_partition::get_auto_increment()
 {
   DBUG_ENTER("ha_partition::get_auto_increment");
+
   DBUG_RETURN(m_file[0]->get_auto_increment());
 }
 
@@ -3179,6 +5228,7 @@
 /*
   Function we use in the creation of our hash to get key.
 */
+
 static byte *partition_get_key(PARTITION_SHARE *share, uint *length,
 			       my_bool not_used __attribute__ ((unused)))
 {
@@ -3192,7 +5242,6 @@
   Well, you have pieces that are used for locking, and they are needed to
   function.
 */
-
 
 static PARTITION_SHARE *get_share(const char *table_name, TABLE *table)
 {

--- 1.19/sql/sql_partition.cc	2006-01-06 12:40:23 -06:00
+++ 1.20/sql/sql_partition.cc	2006-01-18 09:40:19 -06:00
@@ -43,13 +43,18 @@
 /*
   Partition related functions declarations and some static constants;
 */
-static const char *hash_str= "HASH";
-static const char *range_str= "RANGE";
-static const char *list_str= "LIST";
+const LEX_STRING partition_keywords[]=
+{
+  { (char *) STRING_WITH_LEN("HASH") },
+  { (char *) STRING_WITH_LEN("RANGE") },
+  { (char *) STRING_WITH_LEN("LIST") }, 
+  { (char *) STRING_WITH_LEN("KEY") },
+  { (char *) STRING_WITH_LEN("MAXVALUE") },
+  { (char *) STRING_WITH_LEN("LINEAR ") }
+};
 static const char *part_str= "PARTITION";
 static const char *sub_str= "SUB";
 static const char *by_str= "BY";
-static const char *key_str= "KEY";
 static const char *space_str= " ";
 static const char *equal_str= "=";
 static const char *end_paren_str= ")";
@@ -57,34 +62,48 @@
 static const char *comma_str= ",";
 static char buff[22];
 
-bool get_partition_id_list(partition_info *part_info,
-                           uint32 *part_id);
-bool get_partition_id_range(partition_info *part_info,
-                            uint32 *part_id);
-bool get_partition_id_hash_nosub(partition_info *part_info,
-                                 uint32 *part_id);
-bool get_partition_id_key_nosub(partition_info *part_info,
-                                uint32 *part_id);
-bool get_partition_id_linear_hash_nosub(partition_info *part_info,
-                                        uint32 *part_id);
-bool get_partition_id_linear_key_nosub(partition_info *part_info,
-                                       uint32 *part_id);
-bool get_partition_id_range_sub_hash(partition_info *part_info,
-                                     uint32 *part_id);
-bool get_partition_id_range_sub_key(partition_info *part_info,
-                                    uint32 *part_id);
-bool get_partition_id_range_sub_linear_hash(partition_info *part_info,
-                                            uint32 *part_id);
-bool get_partition_id_range_sub_linear_key(partition_info *part_info,
-                                           uint32 *part_id);
-bool get_partition_id_list_sub_hash(partition_info *part_info,
-                                    uint32 *part_id);
-bool get_partition_id_list_sub_key(partition_info *part_info,
-                                   uint32 *part_id);
-bool get_partition_id_list_sub_linear_hash(partition_info *part_info,
-                                           uint32 *part_id);
-bool get_partition_id_list_sub_linear_key(partition_info *part_info,
-                                          uint32 *part_id);
+int get_partition_id_list(partition_info *part_info,
+                           uint32 *part_id,
+                           longlong *func_value);
+int get_partition_id_range(partition_info *part_info,
+                            uint32 *part_id,
+                            longlong *func_value);
+int get_partition_id_hash_nosub(partition_info *part_info,
+                                 uint32 *part_id,
+                                 longlong *func_value);
+int get_partition_id_key_nosub(partition_info *part_info,
+                                uint32 *part_id,
+                                longlong *func_value);
+int get_partition_id_linear_hash_nosub(partition_info *part_info,
+                                        uint32 *part_id,
+                                        longlong *func_value);
+int get_partition_id_linear_key_nosub(partition_info *part_info,
+                                       uint32 *part_id,
+                                       longlong *func_value);
+int get_partition_id_range_sub_hash(partition_info *part_info,
+                                     uint32 *part_id,
+                                     longlong *func_value);
+int get_partition_id_range_sub_key(partition_info *part_info,
+                                    uint32 *part_id,
+                                    longlong *func_value);
+int get_partition_id_range_sub_linear_hash(partition_info *part_info,
+                                            uint32 *part_id,
+                                            longlong *func_value);
+int get_partition_id_range_sub_linear_key(partition_info *part_info,
+                                           uint32 *part_id,
+                                           longlong *func_value);
+int get_partition_id_list_sub_hash(partition_info *part_info,
+                                    uint32 *part_id,
+                                    longlong *func_value);
+int get_partition_id_list_sub_key(partition_info *part_info,
+                                   uint32 *part_id,
+                                   longlong *func_value);
+int get_partition_id_list_sub_linear_hash(partition_info *part_info,
+                                           uint32 *part_id,
+                                           longlong *func_value);
+int get_partition_id_list_sub_linear_key(partition_info *part_info,
+                                          uint32 *part_id,
+                                          longlong *func_value);
 uint32 get_partition_id_hash_sub(partition_info *part_info); 
 uint32 get_partition_id_key_sub(partition_info *part_info); 
 uint32 get_partition_id_linear_hash_sub(partition_info *part_info); 
@@ -95,12 +114,15 @@
 /*
   A routine used by the parser to decide whether we are specifying a full
   partitioning or if only partitions to add or to split.
+
   SYNOPSIS
     is_partition_management()
     lex                    Reference to the lex object
+
   RETURN VALUE
     TRUE                   Yes, it is part of a management partition command
     FALSE                  No, not a management partition command
+
   DESCRIPTION
     This needs to be outside of WITH_PARTITION_STORAGE_ENGINE since it is
     used from the sql parser that doesn't have any #ifdef's
@@ -110,31 +132,34 @@
 {
   return (lex->sql_command == SQLCOM_ALTER_TABLE &&
           (lex->alter_info.flags == ALTER_ADD_PARTITION ||
-           lex->alter_info.flags == ALTER_REORGANISE_PARTITION));
+           lex->alter_info.flags == ALTER_REORGANIZE_PARTITION));
 }
 
 #ifdef WITH_PARTITION_STORAGE_ENGINE
 /*
-  A support function to check if a partition name is in a list of strings
+  A support function to check if a name is in a list of strings
+
   SYNOPSIS
-    is_partition_in_list()
-    part_name          String searched for
-    list_part_names    A list of names searched in
+    is_name_in_list()
+    name               String searched for
+    list_names         A list of names searched in
+
   RETURN VALUES
     TRUE               String found
     FALSE              String not found
 */
 
-bool is_partition_in_list(char *part_name,
-                          List<char> list_part_names)
+bool is_name_in_list(char *name,
+                          List<char> list_names)
 {
-  List_iterator<char> part_names_it(list_part_names);
-  uint no_names= list_part_names.elements;
+  List_iterator<char> names_it(list_names);
+  uint no_names= list_names.elements;
   uint i= 0;
+
   do
   {
-    char *list_name= part_names_it++;
-    if (!(my_strcasecmp(system_charset_info, part_name, list_name)))
+    char *list_name= names_it++;
+    if (!(my_strcasecmp(system_charset_info, name, list_name)))
       return TRUE;
   } while (++i < no_names);
   return FALSE;
@@ -144,47 +169,99 @@
 /*
   A support function to check partition names for duplication in a
   partitioned table
+
   SYNOPSIS
-    is_partitions_in_table()
+    are_partitions_in_table()
     new_part_info      New partition info
     old_part_info      Old partition info
+
   RETURN VALUES
     TRUE               Duplicate names found
     FALSE              Duplicate names not found
+
   DESCRIPTION
     Can handle that the new and old parts are the same in which case it
     checks that the list of names in the partitions doesn't contain any
     duplicated names.
 */
 
-bool is_partitions_in_table(partition_info *new_part_info,
-                            partition_info *old_part_info)
+char *are_partitions_in_table(partition_info *new_part_info,
+                              partition_info *old_part_info)
 {
-  uint no_new_parts= new_part_info->partitions.elements, new_count;
-  uint no_old_parts= old_part_info->partitions.elements, old_count;
+  uint no_new_parts= new_part_info->partitions.elements;
+  uint no_old_parts= old_part_info->partitions.elements;
+  uint new_count, old_count;
   List_iterator<partition_element> new_parts_it(new_part_info->partitions);
-  bool same_part_info= (new_part_info == old_part_info);
-  DBUG_ENTER("is_partitions_in_table");
+  bool is_same_part_info= (new_part_info == old_part_info);
+  DBUG_ENTER("are_partitions_in_table");
+  DBUG_PRINT("enter", ("%u", no_new_parts));
 
   new_count= 0;
   do
   {
     List_iterator<partition_element> old_parts_it(old_part_info->partitions);
     char *new_name= (new_parts_it++)->partition_name;
+    DBUG_PRINT("info", ("%s", new_name));
     new_count++;
     old_count= 0;
     do
     {
       char *old_name= (old_parts_it++)->partition_name;
       old_count++;
-      if (same_part_info && old_count == new_count)
+      if (is_same_part_info && old_count == new_count)
         break;
       if (!(my_strcasecmp(system_charset_info, old_name, new_name)))
       {
-        DBUG_RETURN(TRUE);
+        DBUG_PRINT("info", ("old_name = %s, not ok", old_name));
+        DBUG_RETURN(old_name);
       }
     } while (old_count < no_old_parts);
   } while (new_count < no_new_parts);
+  DBUG_RETURN(NULL);
+}
+
+/*
+  Set-up defaults for partitions. 
+
+  SYNOPSIS
+    partition_default_handling()
+    table                         Table object
+    table_name                    Table name to use when getting no_parts
+    db_name                       Database name to use when getting no_parts
+    part_info                     Partition info to set up
+
+  RETURN VALUES
+    TRUE                          Error
+    FALSE                         Success
+*/
+
+bool partition_default_handling(TABLE *table, partition_info *part_info)
+{
+  DBUG_ENTER("partition_default_handling");
+
+  if (part_info->use_default_no_partitions)
+  {
+    if (table->file->get_no_parts(table->s->normalized_path.str,
+                                  &part_info->no_parts))
+    {
+      DBUG_RETURN(TRUE);
+    }
+  }
+  else if (is_sub_partitioned(part_info) &&
+           part_info->use_default_no_subpartitions)
+  {
+    uint no_parts;
+    if (table->file->get_no_parts(table->s->normalized_path.str,
+                                  &no_parts))
+    {
+      DBUG_RETURN(TRUE);
+    }
+    DBUG_ASSERT(part_info->no_parts > 0);
+    part_info->no_subparts= no_parts / part_info->no_parts;
+    DBUG_ASSERT((no_parts % part_info->no_parts) == 0);
+  }
+  set_up_defaults_for_partitioning(part_info, table->file,
+                                   (ulonglong)0, (uint)0);
   DBUG_RETURN(FALSE);
 }
 
@@ -235,7 +312,7 @@
         break;
       if (!(my_strcasecmp(system_charset_info, old_name, new_name)))
       {
-        if (!is_partition_in_list(old_name, list_part_names))
+        if (!is_name_in_list(old_name, list_part_names))
           DBUG_RETURN(TRUE);
       }
     } while (old_count < no_old_parts);
@@ -247,36 +324,35 @@
 /*
   A useful routine used by update_row for partition handlers to calculate
   the partition ids of the old and the new record.
+
   SYNOPSIS
     get_part_for_update()
     old_data                Buffer of old record
     new_data                Buffer of new record
     rec0                    Reference to table->record[0]
     part_info               Reference to partition information
-    part_field_array        A NULL-terminated array of fields for partition
-                            function
-    old_part_id             The returned partition id of old record 
-    new_part_id             The returned partition id of new record 
+    out:old_part_id         The returned partition id of old record 
+    out:new_part_id         The returned partition id of new record
+
   RETURN VALUE
     0                       Success
     > 0                     Error code
-  DESCRIPTION
-    Dependent on whether buf is not record[0] we need to prepare the
-    fields. Then we call the function pointer get_partition_id to
-    calculate the partition ids.
 */
 
 int get_parts_for_update(const byte *old_data, byte *new_data,
                          const byte *rec0, partition_info *part_info,
-                         uint32 *old_part_id, uint32 *new_part_id)
+                         uint32 *old_part_id, uint32 *new_part_id,
+                         longlong *new_func_value)
 {
   Field **part_field_array= part_info->full_part_field_array;
   int error;
+  longlong old_func_value;
   DBUG_ENTER("get_parts_for_update");
-  DBUG_ASSERT(new_data == rec0);
 
+  DBUG_ASSERT(new_data == rec0);
   set_field_ptr(part_field_array, old_data, rec0);
-  error= part_info->get_partition_id(part_info, old_part_id);
+  error= part_info->get_partition_id(part_info, old_part_id,
+                                     &old_func_value);
   set_field_ptr(part_field_array, rec0, old_data);
   if (unlikely(error))                             // Should never happen
   {
@@ -287,7 +363,9 @@
   if (new_data == rec0)
 #endif
   {
-    if (unlikely(error= part_info->get_partition_id(part_info,new_part_id)))
+    if (unlikely(error= part_info->get_partition_id(part_info,
+                                                    new_part_id,
+                                                    new_func_value)))
     {
       DBUG_RETURN(error);
     }
@@ -301,7 +379,8 @@
       condition is false in one test situation before pushing the code.
     */
     set_field_ptr(part_field_array, new_data, rec0);
-    error= part_info->get_partition_id(part_info, new_part_id);
+    error= part_info->get_partition_id(part_info, new_part_id,
+                                       new_func_value);
     set_field_ptr(part_field_array, rec0, new_data);
     if (unlikely(error))
     {
@@ -316,17 +395,18 @@
 /*
   A useful routine used by delete_row for partition handlers to calculate
   the partition id.
+
   SYNOPSIS
     get_part_for_delete()
     buf                     Buffer of old record
     rec0                    Reference to table->record[0]
     part_info               Reference to partition information
-    part_field_array        A NULL-terminated array of fields for partition
-                            function
-    part_id                 The returned partition id to delete from
+    out:part_id             The returned partition id to delete from
+
   RETURN VALUE
     0                       Success
     > 0                     Error code
+
   DESCRIPTION
     Dependent on whether buf is not record[0] we need to prepare the
     fields. Then we call the function pointer get_partition_id to
@@ -337,11 +417,13 @@
                         partition_info *part_info, uint32 *part_id)
 {
   int error;
+  longlong func_value;
   DBUG_ENTER("get_part_for_delete");
 
   if (likely(buf == rec0))
   {
-    if (unlikely((error= part_info->get_partition_id(part_info, part_id))))
+    if (unlikely((error= part_info->get_partition_id(part_info, part_id,
+                                                     &func_value))))
     {
       DBUG_RETURN(error);
     }
@@ -351,7 +433,7 @@
   {
     Field **part_field_array= part_info->full_part_field_array;
     set_field_ptr(part_field_array, buf, rec0);
-    error= part_info->get_partition_id(part_info, part_id);
+    error= part_info->get_partition_id(part_info, part_id, &func_value);
     set_field_ptr(part_field_array, rec0, buf);
     if (unlikely(error))
     {
@@ -368,12 +450,15 @@
   check what partition a certain value belongs to. At the same time it does
   also check that the range constants are defined in increasing order and
   that the expressions are constant integer expressions.
+
   SYNOPSIS
     check_range_constants()
-      part_info
+    part_info             Partition info
+
   RETURN VALUE
     TRUE                An error occurred during creation of range constants
     FALSE               Successful creation of range constant mapping
+
   DESCRIPTION
     This routine is called from check_partition_info to get a quick error
     before we came too far into the CREATE TABLE process. It is also called
@@ -384,8 +469,10 @@
 static bool check_range_constants(partition_info *part_info)
 {
   partition_element* part_def;
-  longlong current_largest_int= LONGLONG_MIN, part_range_value_int;
-  uint no_parts= part_info->no_parts, i;
+  longlong current_largest_int= LONGLONG_MIN;
+  longlong part_range_value_int;
+  uint no_parts= part_info->no_parts;
+  uint i;
   List_iterator<partition_element> it(part_info->partitions);
   bool result= TRUE;
   DBUG_ENTER("check_range_constants");
@@ -396,7 +483,7 @@
                       (longlong*)sql_alloc(no_parts * sizeof(longlong));
   if (unlikely(part_info->range_int_array == NULL))
   {
-    my_error(ER_OUTOFMEMORY, MYF(0), no_parts*sizeof(longlong));
+    mem_alloc_error(no_parts * sizeof(longlong));
     goto end;
   }
   i= 0;
@@ -427,10 +514,12 @@
 /*
   A support routine for check_list_constants used by qsort to sort the
   constant list expressions.
+
   SYNOPSIS
     list_part_cmp()
       a                First list constant to compare with
       b                Second list constant to compare with
+
   RETURN VALUE
     +1                 a > b
     0                  a  == b
@@ -439,9 +528,8 @@
 
 static int list_part_cmp(const void* a, const void* b)
 {
-  longlong a1, b1;
-  a1= ((LIST_PART_ENTRY*)a)->list_value;
-  b1= ((LIST_PART_ENTRY*)b)->list_value;
+  longlong a1= ((LIST_PART_ENTRY*)a)->list_value;
+  longlong b1= ((LIST_PART_ENTRY*)b)->list_value;
   if (a1 < b1)
     return -1;
   else if (a1 > b1)
@@ -456,12 +544,15 @@
   check what partition a certain value belongs to. At the same time it does
   also check that there are no duplicates among the list constants and that
   that the list expressions are constant integer expressions.
+
   SYNOPSIS
     check_list_constants()
-      part_info
+    part_info             Partition info
+
   RETURN VALUE
     TRUE                  An error occurred during creation of list constants
     FALSE                 Successful creation of list constant mapping
+
   DESCRIPTION
     This routine is called from check_partition_info to get a quick error
     before we came too far into the CREATE TABLE process. It is also called
@@ -471,9 +562,12 @@
 
 static bool check_list_constants(partition_info *part_info)
 {
-  uint i, no_list_values= 0, no_parts, list_index= 0;
+  uint i, no_parts;
+  uint no_list_values= 0;
+  uint list_index= 0;
   longlong *list_value;
-  bool not_first, result= TRUE;
+  bool not_first;
+  bool result= TRUE;
   longlong curr_value, prev_value;
   partition_element* part_def;
   List_iterator<partition_element> list_func_it(part_info->partitions);
@@ -511,7 +605,7 @@
       (LIST_PART_ENTRY*)sql_alloc(no_list_values*sizeof(LIST_PART_ENTRY));
   if (unlikely(part_info->list_array == NULL))
   {
-    my_error(ER_OUTOFMEMORY, MYF(0), no_list_values*sizeof(LIST_PART_ENTRY));
+    mem_alloc_error(no_list_values * sizeof(LIST_PART_ENTRY));
     goto end;
   }
 
@@ -555,12 +649,16 @@
 /*
   Create a memory area where default partition names are stored and fill it
   up with the names.
+
   SYNOPSIS
     create_default_partition_names()
     no_parts                        Number of partitions
+    start_no                        Starting partition number
     subpart                         Is it subpartitions
+
   RETURN VALUE
     A pointer to the memory area of the default partition names
+
   DESCRIPTION
     A support routine for the partition code where default values are
     generated.
@@ -571,17 +669,18 @@
 
 static char *create_default_partition_names(const char *partition_name,
                                             uint no_parts, uint start_no,
-                                            bool subpart)
+                                            bool is_subpart)
 {
   char *ptr= sql_calloc(no_parts*MAX_PART_NAME_SIZE);
   char *move_ptr= ptr;
   uint i= 0;
   DBUG_ENTER("create_default_partition_names");
+
   if (likely(ptr != 0))
   {
     do
     {
-      if (subpart)
+      if (is_subpart)
         my_sprintf(move_ptr, (move_ptr,"%s_sp%u", partition_name, 
                    (start_no + i)));
       else
@@ -591,7 +690,7 @@
   }
   else
   {
-    my_error(ER_OUTOFMEMORY, MYF(0), no_parts*MAX_PART_NAME_SIZE);
+    mem_alloc_error(no_parts*MAX_PART_NAME_SIZE);
   }
   DBUG_RETURN(ptr);
 }
@@ -601,14 +700,18 @@
   Set up all the default partitions not set-up by the user in the SQL
   statement. Also perform a number of checks that the user hasn't tried
   to use default values where no defaults exists.
+
   SYNOPSIS
     set_up_default_partitions()
     part_info           The reference to all partition information
     file                A reference to a handler of the table
     max_rows            Maximum number of rows stored in the table
+    start_no            Starting partition number
+
   RETURN VALUE
     TRUE                Error, attempted default values not possible
     FALSE               Ok, default partitions set-up
+
   DESCRIPTION
     The routine uses the underlying handler of the partitioning to define
     the default number of partitions. For some handlers this requires
@@ -631,16 +734,15 @@
   {
     const char *error_string;
     if (part_info->part_type == RANGE_PARTITION)
-      error_string= range_str;
+      error_string= partition_keywords[PKW_RANGE].str;
     else
-      error_string= list_str;
+      error_string= partition_keywords[PKW_LIST].str;
     my_error(ER_PARTITIONS_MUST_BE_DEFINED_ERROR, MYF(0), error_string);
     goto end;
   }
   if (part_info->no_parts == 0)
     part_info->no_parts= file->get_default_no_partitions(max_rows);
   no_parts= part_info->no_parts;
-  part_info->use_default_partitions= FALSE;
   if (unlikely(no_parts > MAX_PARTITIONS))
   {
     my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0));
@@ -654,16 +756,16 @@
   do
   {
     partition_element *part_elem= new partition_element();
-    if (likely(part_elem != 0))
+    if (likely(part_elem != 0 &&
+               (!part_info->partitions.push_back(part_elem))))
     {
-      part_elem->engine_type= NULL;
+      part_elem->engine_type= part_info->default_engine_type;
       part_elem->partition_name= default_name;
       default_name+=MAX_PART_NAME_SIZE;
-      part_info->partitions.push_back(part_elem);
     }
     else
     {
-      my_error(ER_OUTOFMEMORY, MYF(0), sizeof(partition_element));
+      mem_alloc_error(sizeof(partition_element));
       goto end;
     }
   } while (++i < no_parts);
@@ -677,14 +779,17 @@
   Set up all the default subpartitions not set-up by the user in the SQL
   statement. Also perform a number of checks that the default partitioning
   becomes an allowed partitioning scheme.
+
   SYNOPSIS
     set_up_default_subpartitions()
     part_info           The reference to all partition information
     file                A reference to a handler of the table
     max_rows            Maximum number of rows stored in the table
+
   RETURN VALUE
     TRUE                Error, attempted default values not possible
     FALSE               Ok, default partitions set-up
+
   DESCRIPTION
     The routine uses the underlying handler of the partitioning to define
     the default number of partitions. For some handlers this requires
@@ -708,7 +813,6 @@
     part_info->no_subparts= file->get_default_no_partitions(max_rows);
   no_parts= part_info->no_parts;
   no_subparts= part_info->no_subparts;
-  part_info->use_default_subpartitions= FALSE;
   if (unlikely((no_parts * no_subparts) > MAX_PARTITIONS))
   {
     my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0));
@@ -726,16 +830,16 @@
     do
     {
       partition_element *subpart_elem= new partition_element();
-      if (likely(subpart_elem != 0))
+      if (likely(subpart_elem != 0 &&
+          (!part_elem->subpartitions.push_back(subpart_elem))))
       {
-        subpart_elem->engine_type= NULL;
+        subpart_elem->engine_type= part_info->default_engine_type;
         subpart_elem->partition_name= name_ptr;
         name_ptr+= MAX_PART_NAME_SIZE;
-        part_elem->subpartitions.push_back(subpart_elem);
       }
       else
       {
-        my_error(ER_OUTOFMEMORY, MYF(0), sizeof(partition_element));
+        mem_alloc_error(sizeof(partition_element));
         goto end;
       }
     } while (++j < no_subparts);
@@ -747,18 +851,22 @@
 
 
 /*
-  Set up defaults for partition or subpartition (cannot set-up for both,
-  this will return an error.
+  Support routine for check_partition_info
+
   SYNOPSIS
     set_up_defaults_for_partitioning()
     part_info           The reference to all partition information
     file                A reference to a handler of the table
     max_rows            Maximum number of rows stored in the table
+    start_no            Starting partition number
+
   RETURN VALUE
     TRUE                Error, attempted default values not possible
     FALSE               Ok, default partitions set-up
+
   DESCRIPTION
-    Support routine for check_partition_info
+    Set up defaults for partition or subpartition (cannot set-up for both,
+    this will return an error.
 */
 
 bool set_up_defaults_for_partitioning(partition_info *part_info,
@@ -767,11 +875,15 @@
 {
   DBUG_ENTER("set_up_defaults_for_partitioning");
 
-  if (part_info->use_default_partitions)
-    DBUG_RETURN(set_up_default_partitions(part_info, file, max_rows,
-                                          start_no));
-  if (is_sub_partitioned(part_info) && part_info->use_default_subpartitions)
-    DBUG_RETURN(set_up_default_subpartitions(part_info, file, max_rows));
+  if (!part_info->default_partitions_setup)
+  {
+    part_info->default_partitions_setup= TRUE;
+    if (part_info->use_default_partitions)
+      DBUG_RETURN(set_up_default_partitions(part_info, file, max_rows,
+                                            start_no));
+    if (is_sub_partitioned(part_info) && part_info->use_default_subpartitions)
+      DBUG_RETURN(set_up_default_subpartitions(part_info, file, max_rows));
+  }
   DBUG_RETURN(FALSE);
 }
 
@@ -779,21 +891,22 @@
 /*
   Check that all partitions use the same storage engine.
   This is currently a limitation in this version.
+
   SYNOPSIS
     check_engine_mix()
     engine_array           An array of engine identifiers
     no_parts               Total number of partitions
+
   RETURN VALUE
     TRUE                   Error, mixed engines
     FALSE                  Ok, no mixed engines
+  DESCRIPTION
+    Current check verifies only that all handlers are the same.
+    Later this check will be more sophisticated.
 */
 
 static bool check_engine_mix(handlerton **engine_array, uint no_parts)
 {
-  /*
-    Current check verifies only that all handlers are the same.
-    Later this check will be more sophisticated.
-  */
   uint i= 0;
   bool result= FALSE;
   DBUG_ENTER("check_engine_mix");
@@ -811,31 +924,35 @@
 
 
 /*
-  We will check that the partition info requested is possible to set-up in
-  this version. This routine is an extension of the parser one could say.
-  If defaults were used we will generate default data structures for all
-  partitions.
+  This code is used early in the CREATE TABLE and ALTER TABLE process.
+
   SYNOPSIS
     check_partition_info()
     part_info           The reference to all partition information
-    db_type             Default storage engine if no engine specified per
-                        partition.
     file                A reference to a handler of the table
     max_rows            Maximum number of rows stored in the table
+    engine_type         Return value for used engine in partitions
+
   RETURN VALUE
     TRUE                 Error, something went wrong
     FALSE                Ok, full partition data structures are now generated
+
   DESCRIPTION
-    This code is used early in the CREATE TABLE and ALTER TABLE process.
+    We will check that the partition info requested is possible to set-up in
+    this version. This routine is an extension of the parser one could say.
+    If defaults were used we will generate default data structures for all
+    partitions.
+
 */
 
-bool check_partition_info(partition_info *part_info,handlerton *eng_type,
+bool check_partition_info(partition_info *part_info,handlerton **eng_type,
                           handler *file, ulonglong max_rows)
 {
   handlerton **engine_array= NULL;
-  uint part_count= 0, i, no_parts, tot_partitions;
+  uint part_count= 0;
+  uint i, no_parts, tot_partitions;
   bool result= TRUE;
-  List_iterator<partition_element> part_it(part_info->partitions);
+  char *same_name;
   DBUG_ENTER("check_partition_info");
 
   if (unlikely(is_sub_partitioned(part_info) &&
@@ -855,9 +972,10 @@
     my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0));
     goto end;
   }
-  if (unlikely(part_info->has_duplicate_names()))
+  if (((same_name= are_partitions_in_table(part_info,
+                                           part_info))))
   {
-    my_error(ER_SAME_NAME_PARTITION, MYF(0));
+    my_error(ER_SAME_NAME_PARTITION, MYF(0), same_name);
     goto end;
   }
   engine_array= (handlerton**)my_malloc(tot_partitions * sizeof(handlerton *), 
@@ -866,36 +984,44 @@
     goto end;
   i= 0;
   no_parts= part_info->no_parts;
-  do
   {
-    partition_element *part_elem= part_it++;
-    if (!is_sub_partitioned(part_info))
-    {
-      if (part_elem->engine_type == NULL)
-        part_elem->engine_type= eng_type;
-      DBUG_PRINT("info", ("engine = %s", part_elem->engine_type->name));
-      engine_array[part_count++]= part_elem->engine_type;
-    }
-    else
+    List_iterator<partition_element> part_it(part_info->partitions);
+    do
     {
-      uint j= 0, no_subparts= part_info->no_subparts;;
-      List_iterator<partition_element> sub_it(part_elem->subpartitions);
-      do
+      partition_element *part_elem= part_it++;
+      if (!is_sub_partitioned(part_info))
       {
-        part_elem= sub_it++;
         if (part_elem->engine_type == NULL)
-          part_elem->engine_type= eng_type;
-        DBUG_PRINT("info", ("engine = %s", part_elem->engine_type->name));
+          part_elem->engine_type= part_info->default_engine_type;
+        DBUG_PRINT("info", ("engine = %d",
+                   ha_legacy_type(part_elem->engine_type)));
         engine_array[part_count++]= part_elem->engine_type;
-      } while (++j < no_subparts);
-    }
-  } while (++i < part_info->no_parts);
+      }
+      else
+      {
+        uint j= 0, no_subparts= part_info->no_subparts;;
+        List_iterator<partition_element> sub_it(part_elem->subpartitions);
+        do
+        {
+          part_elem= sub_it++;
+          if (part_elem->engine_type == NULL)
+            part_elem->engine_type= part_info->default_engine_type;
+          DBUG_PRINT("info", ("engine = %u",
+                     ha_legacy_type(part_elem->engine_type)));
+          engine_array[part_count++]= part_elem->engine_type;
+        } while (++j < no_subparts);
+      }
+    } while (++i < part_info->no_parts);
+  }
   if (unlikely(check_engine_mix(engine_array, part_count)))
   {
     my_error(ER_MIX_HANDLER_ERROR, MYF(0));
     goto end;
   }
 
+  if (eng_type)
+    *eng_type= (handlerton*)engine_array[0];
+
   /*
     We need to check all constant expressions that they are of the correct
     type and that they are increasing for ranges and not overlapping for
@@ -915,51 +1041,54 @@
 
 
 /*
-  A great number of functions below here is part of the fix_partition_func
-  method. It is used to set up the partition structures for execution from
-  openfrm. It is called at the end of the openfrm when the table struct has
-  been set-up apart from the partition information.
-  It involves:
-  1) Setting arrays of fields for the partition functions.
-  2) Setting up binary search array for LIST partitioning
-  3) Setting up array for binary search for RANGE partitioning
-  4) Setting up key_map's to assist in quick evaluation whether one
-     can deduce anything from a given index of what partition to use
-  5) Checking whether a set of partitions can be derived from a range on
-     a field in the partition function.
-  As part of doing this there is also a great number of error controls.
-  This is actually the place where most of the things are checked for
-  partition information when creating a table.
-  Things that are checked includes
-  1) No NULLable fields in partition function
-  2) All fields of partition function in Primary keys and unique indexes
-     (if not supported)
-  3) No fields in partition function that are BLOB's or VARCHAR with a
-     collation other than the binary collation.
-
-
+  This method is used to set-up both partition and subpartitioning
+  field array and used for all types of partitioning.
+  It is part of the logic around fix_partition_func.
 
-  Create an array of partition fields (NULL terminated). Before this method
-  is called fix_fields or find_table_in_sef has been called to set
-  GET_FIXED_FIELDS_FLAG on all fields that are part of the partition
-  function.
   SYNOPSIS
     set_up_field_array()
     table                TABLE object for which partition fields are set-up
     sub_part             Is the table subpartitioned as well
+
   RETURN VALUE
     TRUE                 Error, some field didn't meet requirements
     FALSE                Ok, partition field array set-up
+
   DESCRIPTION
-    This method is used to set-up both partition and subpartitioning
-    field array and used for all types of partitioning.
-    It is part of the logic around fix_partition_func.
+
+    A great number of functions below here is part of the fix_partition_func
+    method. It is used to set up the partition structures for execution from
+    openfrm. It is called at the end of the openfrm when the table struct has
+    been set-up apart from the partition information.
+    It involves:
+    1) Setting arrays of fields for the partition functions.
+    2) Setting up binary search array for LIST partitioning
+    3) Setting up array for binary search for RANGE partitioning
+    4) Setting up key_map's to assist in quick evaluation whether one
+       can deduce anything from a given index of what partition to use
+    5) Checking whether a set of partitions can be derived from a range on
+       a field in the partition function.
+    As part of doing this there is also a great number of error controls.
+    This is actually the place where most of the things are checked for
+    partition information when creating a table.
+    Things that are checked includes
+    1) All fields of partition function in Primary keys and unique indexes
+       (if not supported)
+
+
+    Create an array of partition fields (NULL terminated). Before this method
+    is called fix_fields or find_table_in_sef has been called to set
+    GET_FIXED_FIELDS_FLAG on all fields that are part of the partition
+    function.
 */
+
 static bool set_up_field_array(TABLE *table,
-                              bool sub_part)
+                              bool is_sub_part)
 {
   Field **ptr, *field, **field_array;
-  uint no_fields= 0, size_field_array, i= 0;
+  uint no_fields= 0;
+  uint size_field_array;
+  uint i= 0;
   partition_info *part_info= table->part_info;
   int result= FALSE;
   DBUG_ENTER("set_up_field_array");
@@ -970,11 +1099,19 @@
     if (field->flags & GET_FIXED_FIELDS_FLAG)
       no_fields++;
   }
+  if (no_fields == 0)
+  {
+    /*
+      We are using hidden key as partitioning field
+    */
+    DBUG_ASSERT(!is_sub_part);
+    DBUG_RETURN(result);
+  }
   size_field_array= (no_fields+1)*sizeof(Field*);
   field_array= (Field**)sql_alloc(size_field_array);
   if (unlikely(!field_array))
   {
-    my_error(ER_OUTOFMEMORY, MYF(0), size_field_array);
+    mem_alloc_error(size_field_array);
     result= TRUE;
   }
   ptr= table->field;
@@ -994,11 +1131,6 @@
           1) Not be a BLOB of any type
             A BLOB takes too long time to evaluate so we don't want it for
             performance reasons.
-          2) Not be a VARCHAR other than VARCHAR with a binary collation
-            A VARCHAR with character sets can have several values being
-            equal with different number of spaces or NULL's. This is not a
-            good ground for a safe and exact partition function. Thus it is
-            not allowed in partition functions.
         */
 
         if (unlikely(field->flags & BLOB_FLAG))
@@ -1006,17 +1138,11 @@
           my_error(ER_BLOB_FIELD_IN_PART_FUNC_ERROR, MYF(0));
           result= TRUE;
         }
-        else if (unlikely((!field->flags & BINARY_FLAG) &&
-                          field->real_type() == MYSQL_TYPE_VARCHAR))
-        {
-          my_error(ER_CHAR_SET_IN_PART_FIELD_ERROR, MYF(0));
-          result= TRUE;
-        }
       }
     }
   }
   field_array[no_fields]= 0;
-  if (!sub_part)
+  if (!is_sub_part)
   {
     part_info->part_field_array= field_array;
     part_info->no_part_fields= no_fields;
@@ -1033,13 +1159,16 @@
 /*
   Create a field array including all fields of both the partitioning and the
   subpartitioning functions.
+
   SYNOPSIS
     create_full_part_field_array()
     table                TABLE object for which partition fields are set-up
     part_info            Reference to partitioning data structure
+
   RETURN VALUE
     TRUE                 Memory allocation of field array failed
     FALSE                Ok
+
   DESCRIPTION
     If there is no subpartitioning then the same array is used as for the
     partitioning. Otherwise a new array is built up using the flag
@@ -1072,7 +1201,7 @@
     field_array= (Field**)sql_alloc(size_field_array);
     if (unlikely(!field_array))
     {
-      my_error(ER_OUTOFMEMORY, MYF(0), size_field_array);
+      mem_alloc_error(size_field_array);
       result= TRUE;
       goto end;
     }
@@ -1093,21 +1222,25 @@
 
 
 /*
-  These support routines is used to set/reset an indicator of all fields
-  in a certain key. It is used in conjunction with another support routine
-  that traverse all fields in the PF to find if all or some fields in the
-  PF is part of the key. This is used to check primary keys and unique
-  keys involve all fields in PF (unless supported) and to derive the
-  key_map's used to quickly decide whether the index can be used to
-  derive which partitions are needed to scan.
-
-
 
   Clear flag GET_FIXED_FIELDS_FLAG in all fields of a key previously set by
   set_indicator_in_key_fields (always used in pairs).
+
   SYNOPSIS
     clear_indicator_in_key_fields()
     key_info                  Reference to find the key fields
+
+  RETURN VALUE
+    NONE
+
+  DESCRIPTION
+    These support routines is used to set/reset an indicator of all fields
+    in a certain key. It is used in conjunction with another support routine
+    that traverse all fields in the PF to find if all or some fields in the
+    PF is part of the key. This is used to check primary keys and unique
+    keys involve all fields in PF (unless supported) and to derive the
+    key_map's used to quickly decide whether the index can be used to
+    derive which partitions are needed to scan.
 */
 
 static void clear_indicator_in_key_fields(KEY *key_info)
@@ -1121,9 +1254,13 @@
 
 /*
   Set flag GET_FIXED_FIELDS_FLAG in all fields of a key.
+
   SYNOPSIS
     set_indicator_in_key_fields
     key_info                  Reference to find the key fields
+
+  RETURN VALUE
+    NONE
 */
 
 static void set_indicator_in_key_fields(KEY *key_info)
@@ -1138,11 +1275,13 @@
 /*
   Check if all or some fields in partition field array is part of a key
   previously used to tag key fields.
+
   SYNOPSIS
     check_fields_in_PF()
     ptr                  Partition field array
-    all_fields           Is all fields of partition field array used in key
-    some_fields          Is some fields of partition field array used in key
+    out:all_fields       Is all fields of partition field array used in key
+    out:some_fields      Is some fields of partition field array used in key
+
   RETURN VALUE
     all_fields, some_fields
 */
@@ -1151,8 +1290,14 @@
                                bool *some_fields)
 {
   DBUG_ENTER("check_fields_in_PF");
+
   *all_fields= TRUE;
   *some_fields= FALSE;
+  if ((!ptr) || !(*ptr))
+  {
+    *all_fields= FALSE;
+    DBUG_VOID_RETURN;
+  }
   do
   {
   /* Check if the field of the PF is part of the current key investigated */
@@ -1168,9 +1313,13 @@
 /*
   Clear flag GET_FIXED_FIELDS_FLAG in all fields of the table.
   This routine is used for error handling purposes.
+
   SYNOPSIS
     clear_field_flag()
     table                TABLE object for which partition fields are set-up
+
+  RETURN VALUE
+    NONE
 */
 
 static void clear_field_flag(TABLE *table)
@@ -1185,35 +1334,42 @@
 
 
 /*
-  This routine sets-up the partition field array for KEY partitioning, it
-  also verifies that all fields in the list of fields is actually a part of
-  the table.
+  find_field_in_table_sef finds the field given its name. All fields get
+  GET_FIXED_FIELDS_FLAG set.
+
   SYNOPSIS
     handle_list_of_fields()
     it                   A list of field names for the partition function
     table                TABLE object for which partition fields are set-up
     part_info            Reference to partitioning data structure
     sub_part             Is the table subpartitioned as well
+
   RETURN VALUE
     TRUE                 Fields in list of fields not part of table
     FALSE                All fields ok and array created
+
   DESCRIPTION
-    find_field_in_table_sef finds the field given its name. All fields get
-    GET_FIXED_FIELDS_FLAG set.
+    This routine sets-up the partition field array for KEY partitioning, it
+    also verifies that all fields in the list of fields is actually a part of
+    the table.
+
 */
 
+
 static bool handle_list_of_fields(List_iterator<char> it,
                                   TABLE *table,
                                   partition_info *part_info,
-                                  bool sub_part)
+                                  bool is_sub_part)
 {
   Field *field;
   bool result;
   char *field_name;
+  bool is_list_empty= TRUE;
   DBUG_ENTER("handle_list_of_fields");
 
   while ((field_name= it++))
   {
+    is_list_empty= FALSE;
     field= find_field_in_table_sef(table, field_name);
     if (likely(field != 0))
       field->flags|= GET_FIXED_FIELDS_FLAG;
@@ -1225,19 +1381,54 @@
       goto end;
     }
   }
-  result= set_up_field_array(table, sub_part);
+  if (is_list_empty)
+  {
+    uint primary_key= table->s->primary_key;
+    if (primary_key != MAX_KEY)
+    {
+      uint no_key_parts= table->key_info[primary_key].key_parts, i;
+      /*
+        In the case of an empty list we use primary key as partition key.
+      */
+      for (i= 0; i < no_key_parts; i++)
+      {
+        Field *field= table->key_info[primary_key].key_part[i].field;
+        field->flags|= GET_FIXED_FIELDS_FLAG;
+      }
+    }
+    else
+    {
+      if (table->s->db_type->partition_flags &&
+          (table->s->db_type->partition_flags() & HA_USE_AUTO_PARTITION) &&
+          (table->s->db_type->partition_flags() & HA_CAN_PARTITION))
+      {
+        /*
+          This engine can handle automatic partitioning and there is no
+          primary key. In this case we rely on that the engine handles
+          partitioning based on a hidden key. Thus we allocate no
+          array for partitioning fields.
+        */
+        DBUG_RETURN(FALSE);
+      }
+      else
+      {
+        my_error(ER_FIELD_NOT_FOUND_PART_ERROR, MYF(0));
+        DBUG_RETURN(TRUE);
+      }
+    }
+  }
+  result= set_up_field_array(table, is_sub_part);
 end:
   DBUG_RETURN(result);
 }
 
 
 /*
-  This function is used to build an array of partition fields for the
-  partitioning function and subpartitioning function. The partitioning
-  function is an item tree that must reference at least one field in the
-  table. This is checked first in the parser that the function doesn't
-  contain non-cacheable parts (like a random function) and by checking
-  here that the function isn't a constant function.
+  The function uses a new feature in fix_fields where the flag 
+  GET_FIXED_FIELDS_FLAG is set for all fields in the item tree.
+  This field must always be reset before returning from the function
+  since it is used for other purposes as well.
+
   SYNOPSIS
     fix_fields_part_func()
     thd                  The thread object
@@ -1245,35 +1436,38 @@
     func_expr            The item tree reference of the partition function
     part_info            Reference to partitioning data structure
     sub_part             Is the table subpartitioned as well
+
   RETURN VALUE
     TRUE                 An error occurred, something was wrong with the
                          partition function.
     FALSE                Ok, a partition field array was created
+
   DESCRIPTION
-    The function uses a new feature in fix_fields where the flag 
-    GET_FIXED_FIELDS_FLAG is set for all fields in the item tree.
-    This field must always be reset before returning from the function
-    since it is used for other purposes as well.
-*/
+    This function is used to build an array of partition fields for the
+    partitioning function and subpartitioning function. The partitioning
+    function is an item tree that must reference at least one field in the
+    table. This is checked first in the parser that the function doesn't
+    contain non-cacheable parts (like a random function) and by checking
+    here that the function isn't a constant function.
 
-static bool fix_fields_part_func(THD *thd, TABLE_LIST *tables,
-                                 Item* func_expr, partition_info *part_info,
-                                 bool sub_part)
-{
-  /*
     Calculate the number of fields in the partition function.
     Use it allocate memory for array of Field pointers.
     Initialise array of field pointers. Use information set when
     calling fix_fields and reset it immediately after.
     The get_fields_in_item_tree activates setting of bit in flags
     on the field object.
-  */
+*/
 
+static bool fix_fields_part_func(THD *thd, TABLE_LIST *tables,
+                                 Item* func_expr, partition_info *part_info,
+                                 bool is_sub_part)
+{
   bool result= TRUE;
   TABLE *table= tables->table;
   TABLE_LIST *save_table_list, *save_first_table, *save_last_table;
   int error;
   Name_resolution_context *context;
+  const char *save_where;
   DBUG_ENTER("fix_fields_part_func");
 
   context= thd->lex->current_context();
@@ -1286,6 +1480,7 @@
   context->first_name_resolution_table= tables;
   context->last_name_resolution_table= NULL;
   func_expr->walk(&Item::change_context_processor, (byte*) context);
+  save_where= thd->where;
   thd->where= "partition function";
   error= func_expr->fix_fields(thd, (Item**)0);
   context->table_list= save_table_list;
@@ -1297,13 +1492,14 @@
     clear_field_flag(table);
     goto end;
   }
+  thd->where= save_where;
   if (unlikely(func_expr->const_item()))
   {
     my_error(ER_CONST_EXPR_IN_PARTITION_FUNC_ERROR, MYF(0));
     clear_field_flag(table);
     goto end;
   }
-  result= set_up_field_array(table, sub_part);
+  result= set_up_field_array(table, is_sub_part);
 end:
   table->get_fields_in_item_tree= FALSE;
   table->map= 0; //Restore old value
@@ -1312,24 +1508,30 @@
 
 
 /*
-  This function verifies that if there is a primary key that it contains
-  all the fields of the partition function.
-  This is a temporary limitation that will hopefully be removed after a
-  while.
+  Check that the primary key contains all partition fields if defined
+
   SYNOPSIS
     check_primary_key()
     table                TABLE object for which partition fields are set-up
+
   RETURN VALUES
     TRUE                 Not all fields in partitioning function was part
                          of primary key
     FALSE                Ok, all fields of partitioning function were part
                          of primary key
+
+  DESCRIPTION
+    This function verifies that if there is a primary key that it contains
+    all the fields of the partition function.
+    This is a temporary limitation that will hopefully be removed after a
+    while.
 */
 
 static bool check_primary_key(TABLE *table)
 {
   uint primary_key= table->s->primary_key;
-  bool all_fields, some_fields, result= FALSE;
+  bool all_fields, some_fields;
+  bool result= FALSE;
   DBUG_ENTER("check_primary_key");
 
   if (primary_key < MAX_KEY)
@@ -1349,25 +1551,33 @@
 
 
 /*
-  This function verifies that if there is a unique index that it contains
-  all the fields of the partition function.
-  This is a temporary limitation that will hopefully be removed after a
-  while.
+  Check that unique keys contains all partition fields
+
   SYNOPSIS
     check_unique_keys()
     table                TABLE object for which partition fields are set-up
+
   RETURN VALUES
     TRUE                 Not all fields in partitioning function was part
                          of all unique keys
     FALSE                Ok, all fields of partitioning function were part
                          of unique keys
+
+  DESCRIPTION
+    This function verifies that if there is a unique index that it contains
+    all the fields of the partition function.
+    This is a temporary limitation that will hopefully be removed after a
+    while.
 */
 
 static bool check_unique_keys(TABLE *table)
 {
-  bool all_fields, some_fields, result= FALSE;
-  uint keys= table->s->keys, i;
+  bool all_fields, some_fields;
+  bool result= FALSE;
+  uint keys= table->s->keys;
+  uint i;
   DBUG_ENTER("check_unique_keys");
+
   for (i= 0; i < keys; i++)
   {
     if (table->key_info[i].flags & HA_NOSAME) //Unique index
@@ -1431,9 +1641,11 @@
   indicating this to notify that we can use also ranges on the field
   of the PF to deduce a set of partitions if the fields of the PF were
   not all fully bound.
+
   SYNOPSIS
     check_range_capable_PF()
     table                TABLE object for which partition fields are set-up
+
   DESCRIPTION
     Support for this is not implemented yet.
 */
@@ -1441,35 +1653,76 @@
 void check_range_capable_PF(TABLE *table)
 {
   DBUG_ENTER("check_range_capable_PF");
+
   DBUG_VOID_RETURN;
 }
 
 
 /*
+  Set up partition bitmap
+
+  SYNOPSIS
+    set_up_partition_bitmap()
+    thd                  Thread object
+    part_info            Reference to partitioning data structure
+
+  RETURN VALUE
+    TRUE                 Memory allocation failure
+    FALSE                Success
+
+  DESCRIPTION
+    Allocate memory for bitmap of the partitioned table
+    and initialise it.
+*/
+
+static bool set_up_partition_bitmap(THD *thd, partition_info *part_info)
+{
+  uint32 *bitmap_buf;
+  uint bitmap_bits= part_info->no_subparts? 
+                     (part_info->no_subparts* part_info->no_parts):
+                      part_info->no_parts;
+  uint bitmap_bytes= bitmap_buffer_size(bitmap_bits);
+  DBUG_ENTER("set_up_partition_bitmap");
+
+  if (!(bitmap_buf= (uint32*)thd->alloc(bitmap_bytes)))
+  {
+    mem_alloc_error(bitmap_bytes);
+    DBUG_RETURN(TRUE);
+  }
+  bitmap_init(&part_info->used_partitions, bitmap_buf, bitmap_bytes*8, FALSE);
+  DBUG_RETURN(FALSE);
+}
+
+
+/*
   Set up partition key maps
+
   SYNOPSIS
     set_up_partition_key_maps()
     table                TABLE object for which partition fields are set-up
     part_info            Reference to partitioning data structure
+
   RETURN VALUES
     None
+
   DESCRIPTION
-  This function sets up a couple of key maps to be able to quickly check
-  if an index ever can be used to deduce the partition fields or even
-  a part of the fields of the  partition function.
-  We set up the following key_map's.
-  PF = Partition Function
-  1) All fields of the PF is set even by equal on the first fields in the
-     key
-  2) All fields of the PF is set if all fields of the key is set
-  3) At least one field in the PF is set if all fields is set
-  4) At least one field in the PF is part of the key
+    This function sets up a couple of key maps to be able to quickly check
+    if an index ever can be used to deduce the partition fields or even
+    a part of the fields of the  partition function.
+    We set up the following key_map's.
+    PF = Partition Function
+    1) All fields of the PF is set even by equal on the first fields in the
+       key
+    2) All fields of the PF is set if all fields of the key is set
+    3) At least one field in the PF is set if all fields is set
+    4) At least one field in the PF is part of the key
 */
 
 static void set_up_partition_key_maps(TABLE *table,
                                       partition_info *part_info)
 {
-  uint keys= table->s->keys, i;
+  uint keys= table->s->keys;
+  uint i;
   bool all_fields, some_fields;
   DBUG_ENTER("set_up_partition_key_maps");
 
@@ -1504,17 +1757,26 @@
 
 
 /*
-  Set-up all function pointers for calculation of partition id,
-  subpartition id and the upper part in subpartitioning. This is to speed up
-  execution of get_partition_id which is executed once every record to be
-  written and deleted and twice for updates.
+  Set up function pointers for partition function
+
   SYNOPSIS
-    set_up_partition_function_pointers()
+    set_up_partition_func_pointers()
     part_info            Reference to partitioning data structure
+
+  RETURN VALUE
+    NONE
+
+  DESCRIPTION
+    Set-up all function pointers for calculation of partition id,
+    subpartition id and the upper part in subpartitioning. This is to speed up
+    execution of get_partition_id which is executed once every record to be
+    written and deleted and twice for updates.
 */
 
 static void set_up_partition_func_pointers(partition_info *part_info)
 {
+  DBUG_ENTER("set_up_partition_func_pointers");
+
   if (is_sub_partitioned(part_info))
   {
     if (part_info->part_type == RANGE_PARTITION)
@@ -1547,7 +1809,7 @@
         }
       }
     }
-    else //LIST Partitioning
+    else /* LIST Partitioning */
     {
       part_info->get_part_partition_id= get_partition_id_list;
       if (part_info->list_of_subpart_fields)
@@ -1578,7 +1840,7 @@
       }
     }
   }
-  else //No subpartitioning
+  else /* No subpartitioning */
   {
     part_info->get_part_partition_id= NULL;
     part_info->get_subpartition_id= NULL;
@@ -1586,7 +1848,7 @@
       part_info->get_partition_id= get_partition_id_range;
     else if (part_info->part_type == LIST_PARTITION)
       part_info->get_partition_id= get_partition_id_list;
-    else //HASH partitioning
+    else /* HASH partitioning */
     {
       if (part_info->list_of_part_fields)
       {
@@ -1604,21 +1866,27 @@
       }
     }
   }
+  DBUG_VOID_RETURN;
 }
           
         
 /*
   For linear hashing we need a mask which is on the form 2**n - 1 where
   2**n >= no_parts. Thus if no_parts is 6 then mask is 2**3 - 1 = 8 - 1 = 7.
+
   SYNOPSIS
     set_linear_hash_mask()
     part_info            Reference to partitioning data structure
     no_parts             Number of parts in linear hash partitioning
+
+  RETURN VALUE
+    NONE
 */
 
 static void set_linear_hash_mask(partition_info *part_info, uint no_parts)
 {
   uint mask;
+
   for (mask= 1; mask < no_parts; mask<<=1)
     ;
   part_info->linear_hash_mask= mask - 1;
@@ -1628,13 +1896,16 @@
 /*
   This function calculates the partition id provided the result of the hash
   function using linear hashing parameters, mask and number of partitions.
+
   SYNOPSIS
     get_part_id_from_linear_hash()
     hash_value          Hash value calculated by HASH function or KEY function
     mask                Mask calculated previously by set_linear_hash_mask
     no_parts            Number of partitions in HASH partitioned part
+
   RETURN VALUE
     part_id             The calculated partition identity (starting at 0)
+
   DESCRIPTION
     The partition is calculated according to the theory of linear hashing.
     See e.g. Linear hashing: a new tool for file and table addressing,
@@ -1646,10 +1917,11 @@
                                            uint no_parts)
 {
   uint32 part_id= (uint32)(hash_value & mask);
+
   if (part_id >= no_parts)
   {
     uint new_mask= ((mask + 1) >> 1) - 1;
-    part_id= hash_value & new_mask;
+    part_id= (uint32)(hash_value & new_mask);
   }
   return part_id;
 }
@@ -1662,10 +1934,12 @@
     thd                  The thread object
     name                 The name of the partitioned table
     table                TABLE object for which partition fields are set-up
+    create_table_ind     Indicator of whether openfrm was called as part of
+                         CREATE or ALTER TABLE
 
   RETURN VALUE
-    TRUE
-    FALSE
+    TRUE                 Error
+    FALSE                Success
 
   DESCRIPTION
     The name parameter contains the full table name and is used to get the
@@ -1680,7 +1954,8 @@
     of an error that is not discovered until here.
 */
 
-bool fix_partition_func(THD *thd, const char *name, TABLE *table)
+bool fix_partition_func(THD *thd, const char* name, TABLE *table,
+                        bool is_create_table_ind)
 {
   bool result= TRUE;
   uint dir_length, home_dir_length;
@@ -1692,6 +1967,10 @@
   ulong save_set_query_id= thd->set_query_id;
   DBUG_ENTER("fix_partition_func");
 
+  if (part_info->fixed)
+  {
+    DBUG_RETURN(FALSE);
+  }
   thd->set_query_id= 0;
   /*
     Set-up the TABLE_LIST object to be a list with a single table
@@ -1711,6 +1990,13 @@
   db_name= &db_name_string[home_dir_length];
   tables.db= db_name;
 
+  if (!is_create_table_ind)
+  {
+    if (partition_default_handling(table, part_info))
+    {
+      DBUG_RETURN(TRUE);
+    }
+  }
   if (is_sub_partitioned(part_info))
   {
     DBUG_ASSERT(part_info->subpart_type == HASH_PARTITION);
@@ -1773,13 +2059,13 @@
     const char *error_str;
     if (part_info->part_type == RANGE_PARTITION)
     {
-      error_str= range_str; 
+      error_str= partition_keywords[PKW_RANGE].str; 
       if (unlikely(check_range_constants(part_info)))
         goto end;
     }
     else if (part_info->part_type == LIST_PARTITION)
     {
-      error_str= list_str; 
+      error_str= partition_keywords[PKW_LIST].str; 
       if (unlikely(check_list_constants(part_info)))
         goto end;
     }
@@ -1807,12 +2093,16 @@
     goto end;
   if (unlikely(check_primary_key(table)))
     goto end;
-  if (unlikely((!table->file->partition_flags() & HA_CAN_PARTITION_UNIQUE) &&
+  if (unlikely((!(table->s->db_type->partition_flags &&
+      (table->s->db_type->partition_flags() & HA_CAN_PARTITION_UNIQUE))) &&
                check_unique_keys(table)))
     goto end;
+  if (unlikely(set_up_partition_bitmap(thd, part_info)))
+    goto end;
   check_range_capable_PF(table);
   set_up_partition_key_maps(table, part_info);
   set_up_partition_func_pointers(part_info);
+  part_info->fixed= TRUE;
   result= FALSE;
 end:
   thd->set_query_id= save_set_query_id;
@@ -1831,6 +2121,7 @@
 static int add_write(File fptr, const char *buf, uint len)
 {
   uint len_written= my_write(fptr, (const byte*)buf, len, MYF(0));
+
   if (likely(len == len_written))
     return 0;
   else
@@ -1875,13 +2166,14 @@
 static int add_part_key_word(File fptr, const char *key_string)
 {
   int err= add_string(fptr, key_string);
+
   err+= add_space(fptr);
   return err + add_begin_parenthesis(fptr);
 }
 
 static int add_hash(File fptr)
 {
-  return add_part_key_word(fptr, hash_str);
+  return add_part_key_word(fptr, partition_keywords[PKW_HASH].str);
 }
 
 static int add_partition(File fptr)
@@ -1893,6 +2185,7 @@
 static int add_subpartition(File fptr)
 {
   int err= add_string(fptr, sub_str);
+
   return err + add_partition(fptr);
 }
 
@@ -1905,6 +2198,7 @@
 static int add_subpartition_by(File fptr)
 {
   int err= add_string(fptr, sub_str);
+
   return err + add_partition_by(fptr);
 }
 
@@ -1912,17 +2206,19 @@
 {
   uint i, no_fields;
   int err;
+
   List_iterator<char> part_it(field_list);
-  err= add_part_key_word(fptr, key_str);
+  err= add_part_key_word(fptr, partition_keywords[PKW_KEY].str);
   no_fields= field_list.elements;
   i= 0;
-  do
+  while (i < no_fields)
   {
     const char *field_str= part_it++;
     err+= add_string(fptr, field_str);
     if (i != (no_fields-1))
       err+= add_comma(fptr);
-  } while (++i < no_fields);
+    i++;
+  }
   return err;
 }
 
@@ -1936,6 +2232,7 @@
                               const char *keystr)
 {
   int err= add_string(fptr, keyword);
+
   err+= add_space(fptr);
   err+= add_equal(fptr);
   err+= add_space(fptr);
@@ -1946,6 +2243,7 @@
 static int add_keyword_int(File fptr, const char *keyword, longlong num)
 {
   int err= add_string(fptr, keyword);
+
   err+= add_space(fptr);
   err+= add_equal(fptr);
   err+= add_space(fptr);
@@ -1956,14 +2254,15 @@
 static int add_engine(File fptr, handlerton *engine_type)
 {
   const char *engine_str= engine_type->name;
+  DBUG_PRINT("info", ("ENGINE = %s", engine_str));
   int err= add_string(fptr, "ENGINE = ");
   return err + add_string(fptr, engine_str);
-  return err;
 }
 
 static int add_partition_options(File fptr, partition_element *p_elem)
 {
   int err= 0;
+
   if (p_elem->tablespace_name)
     err+= add_keyword_string(fptr,"TABLESPACE",p_elem->tablespace_name);
   if (p_elem->nodegroup_id != UNDEF_NODEGROUP)
@@ -1985,6 +2284,7 @@
                          partition_element *p_elem)
 {
   int err= 0;
+
   if (part_info->part_type == RANGE_PARTITION)
   {
     err+= add_string(fptr, "VALUES LESS THAN ");
@@ -1995,7 +2295,7 @@
       err+= add_end_parenthesis(fptr);
     }
     else
-      err+= add_string(fptr, "MAXVALUE");
+      err+= add_string(fptr, partition_keywords[PKW_MAXVALUE].str);
   }
   else if (part_info->part_type == LIST_PARTITION)
   {
@@ -2021,16 +2321,19 @@
   Generate the partition syntax from the partition data structure.
   Useful for support of generating defaults, SHOW CREATE TABLES
   and easy partition management.
+
   SYNOPSIS
     generate_partition_syntax()
     part_info                  The partitioning data structure
     buf_length                 A pointer to the returned buffer length
     use_sql_alloc              Allocate buffer from sql_alloc if true
                                otherwise use my_malloc
-    add_default_info           Add info generated by default
+    write_all                  Write everything, also default values
+
   RETURN VALUES
     NULL error
     buf, buf_length            Buffer and its length
+
   DESCRIPTION
   Here we will generate the full syntax for the given command where all
   defaults have been expanded. By so doing the it is also possible to
@@ -2054,44 +2357,42 @@
 char *generate_partition_syntax(partition_info *part_info,
                                 uint *buf_length,
                                 bool use_sql_alloc,
-				bool add_default_info)
+                                bool write_all)
 {
-  uint i,j, no_parts, no_subparts;
+  uint i,j, tot_no_parts, no_subparts, no_parts;
   partition_element *part_elem;
+  partition_element *save_part_elem= NULL;
   ulonglong buffer_length;
   char path[FN_REFLEN];
   int err= 0;
-  DBUG_ENTER("generate_partition_syntax");
+  List_iterator<partition_element> part_it(part_info->partitions);
+  List_iterator<partition_element> temp_it(part_info->temp_partitions);
   File fptr;
   char *buf= NULL; //Return buffer
-  const char *file_name;
+  uint use_temp= 0;
+  uint no_temp_parts= part_info->temp_partitions.elements;
+  bool write_part_state;
+  DBUG_ENTER("generate_partition_syntax");
 
-  sprintf(path, "%s_%lx_%lx", "part_syntax", current_pid,
-          current_thd->thread_id);
-  fn_format(path,path,mysql_tmpdir,".psy", MY_REPLACE_EXT);
-  file_name= &path[0];
-  DBUG_PRINT("info", ("File name = %s", file_name));
-  if (unlikely(((fptr= my_open(file_name,O_CREAT|O_RDWR, MYF(MY_WME))) == -1)))
+  write_part_state= (part_info->part_state && !part_info->part_state_len);
+  if (unlikely(((fptr= create_temp_file(path,mysql_tmpdir,"psy", 0,0))) < 0))
     DBUG_RETURN(NULL);
-#if defined(MSDOS) || defined(__WIN__) || defined(__EMX__) || defined(OS2)
-#else
-  my_delete(file_name, MYF(0));
+#ifndef __WIN__
+  unlink(path);
 #endif
   err+= add_space(fptr);
   err+= add_partition_by(fptr);
   switch (part_info->part_type)
   {
     case RANGE_PARTITION:
-      add_default_info= TRUE;
-      err+= add_part_key_word(fptr, range_str);
+      err+= add_part_key_word(fptr, partition_keywords[PKW_RANGE].str);
       break;
     case LIST_PARTITION:
-      add_default_info= TRUE;
-      err+= add_part_key_word(fptr, list_str);
+      err+= add_part_key_word(fptr, partition_keywords[PKW_LIST].str);
       break;
     case HASH_PARTITION:
       if (part_info->linear_hash_ind)
-        err+= add_string(fptr, "LINEAR ");
+        err+= add_string(fptr, partition_keywords[PKW_LINEAR].str);
       if (part_info->list_of_part_fields)
         err+= add_key_partition(fptr, part_info->part_field_list);
       else
@@ -2108,6 +2409,13 @@
                          part_info->part_func_len);
   err+= add_end_parenthesis(fptr);
   err+= add_space(fptr);
+  if ((!part_info->use_default_no_partitions) &&
+       part_info->use_default_partitions)
+  {
+    err+= add_string(fptr, "PARTITIONS ");
+    err+= add_int(fptr, part_info->no_parts);
+    err+= add_space(fptr);
+  }
   if (is_sub_partitioned(part_info))
   {
     err+= add_subpartition_by(fptr);
@@ -2121,53 +2429,114 @@
                            part_info->subpart_func_len);
     err+= add_end_parenthesis(fptr);
     err+= add_space(fptr);
+    if ((!part_info->use_default_no_subpartitions) && 
+          part_info->use_default_subpartitions)
+    {
+      err+= add_string(fptr, "SUBPARTITIONS ");
+      err+= add_int(fptr, part_info->no_subparts);
+      err+= add_space(fptr);
+    }
   }
-  if (add_default_info)
-  {
-  err+= add_begin_parenthesis(fptr);
-  List_iterator<partition_element> part_it(part_info->partitions);
   no_parts= part_info->no_parts;
+  tot_no_parts= no_parts + no_temp_parts;
   no_subparts= part_info->no_subparts;
-  i= 0;
-  do
+
+  if (write_all || (!part_info->use_default_partitions))
   {
-    part_elem= part_it++;
-    err+= add_partition(fptr);
-    err+= add_string(fptr, part_elem->partition_name);
-    err+= add_space(fptr);
-    err+= add_partition_values(fptr, part_info, part_elem);
-    if (!is_sub_partitioned(part_info))
-      err+= add_partition_options(fptr, part_elem);
-    if (is_sub_partitioned(part_info))
+    err+= add_begin_parenthesis(fptr);
+    i= 0;
+    do
     {
-      err+= add_space(fptr);
-      err+= add_begin_parenthesis(fptr);
-      List_iterator<partition_element> sub_it(part_elem->subpartitions);
-      j= 0;
-      do
+      /*
+        We need to do some clever list manipulation here since we have two
+        different needs for our list processing and here we take some of the
+        cost of using a simpler list processing for the other parts of the
+        code.
+
+        ALTER TABLE REORGANIZE PARTITIONS has the list of partitions to be
+        the final list as the main list and the reorganised partitions is in
+        the temporary partition list. Thus when finding the first part added
+        we insert the temporary list if there is such a list. If there is no
+        temporary list we are performing an ADD PARTITION.
+      */
+      if (use_temp && use_temp <= no_temp_parts)
+      {
+        part_elem= temp_it++;
+        DBUG_ASSERT(no_temp_parts);
+        no_temp_parts--;
+      }
+      else if (use_temp)
+      {
+        DBUG_ASSERT(no_parts);
+        part_elem= save_part_elem;
+        use_temp= 0;
+        no_parts--;
+      }
+      else
       {
-        part_elem= sub_it++;
-        err+= add_subpartition(fptr);
+        part_elem= part_it++;
+        if ((part_elem->part_state == PART_TO_BE_ADDED ||
+             part_elem->part_state == PART_IS_ADDED) && no_temp_parts)
+        {
+          save_part_elem= part_elem;
+          part_elem= temp_it++;
+          no_temp_parts--;
+          use_temp= 1;
+        }
+        else
+        {
+          DBUG_ASSERT(no_parts);
+          no_parts--;
+        }
+      }
+
+      if (part_elem->part_state != PART_IS_DROPPED)
+      {
+        if (write_part_state)
+        {
+          uint32 part_state_id= part_info->part_state_len;
+          part_info->part_state[part_state_id]= (uchar)part_elem->part_state;
+          part_info->part_state_len= part_state_id+1;
+        }
+        err+= add_partition(fptr);
         err+= add_string(fptr, part_elem->partition_name);
         err+= add_space(fptr);
-        err+= add_partition_options(fptr, part_elem);
-        if (j != (no_subparts-1))
+        err+= add_partition_values(fptr, part_info, part_elem);
+        if (!is_sub_partitioned(part_info))
+          err+= add_partition_options(fptr, part_elem);
+        if (is_sub_partitioned(part_info) &&
+            (write_all || (!part_info->use_default_subpartitions)))
+        {
+          err+= add_space(fptr);
+          err+= add_begin_parenthesis(fptr);
+          List_iterator<partition_element> sub_it(part_elem->subpartitions);
+          j= 0;
+          do
+          {
+            part_elem= sub_it++;
+            err+= add_subpartition(fptr);
+            err+= add_string(fptr, part_elem->partition_name);
+            err+= add_space(fptr);
+            err+= add_partition_options(fptr, part_elem);
+            if (j != (no_subparts-1))
+            {
+              err+= add_comma(fptr);
+              err+= add_space(fptr);
+            }
+            else
+              err+= add_end_parenthesis(fptr);
+          } while (++j < no_subparts);
+        }
+        if (i != (tot_no_parts-1))
         {
           err+= add_comma(fptr);
           err+= add_space(fptr);
         }
-        else
-          err+= add_end_parenthesis(fptr);
-      } while (++j < no_subparts);
-    }
-    if (i != (no_parts-1))
-    {
-      err+= add_comma(fptr);
-      err+= add_space(fptr);
-    }
-    else
-      err+= add_end_parenthesis(fptr);
-  } while (++i < no_parts);
+      }
+      if (i == (tot_no_parts-1))
+        err+= add_end_parenthesis(fptr);
+    } while (++i < tot_no_parts);
+    DBUG_ASSERT(!no_parts && !no_temp_parts);
   }
   if (err)
     goto close_file;
@@ -2195,19 +2564,7 @@
     buf[*buf_length]= 0;
 
 close_file:
-  /*
-    Delete the file before closing to ensure the file doesn't get synched
-    to disk unnecessary. We only used the file system as a dynamic array
-    implementation so we are not really interested in getting the file
-    present on disk.
-    This is not possible on Windows so here it has to be done after closing
-    the file. Also on Unix we delete immediately after opening to ensure no
-    other process can read the information written into the file.
-  */
   my_close(fptr, MYF(0));
-#if defined(MSDOS) || defined(__WIN__) || defined(__EMX__) || defined(OS2)
-  my_delete(file_name, MYF(0));
-#endif
   DBUG_RETURN(buf);
 }
 
@@ -2215,10 +2572,12 @@
 /*
   Check if partition key fields are modified and if it can be handled by the
   underlying storage engine.
+
   SYNOPSIS
     partition_key_modified
     table                TABLE object for which partition fields are set-up
     fields               A list of the to be modifed
+
   RETURN VALUES
     TRUE                 Need special handling of UPDATE
     FALSE                Normal UPDATE handling is ok
@@ -2230,9 +2589,11 @@
   partition_info *part_info= table->part_info;
   Item_field *item_field;
   DBUG_ENTER("partition_key_modified");
+
   if (!part_info)
     DBUG_RETURN(FALSE);
-  if (table->file->partition_flags() & HA_CAN_UPDATE_PARTITION_KEY)
+  if (table->s->db_type->partition_flags &&
+      (table->s->db_type->partition_flags() & HA_CAN_UPDATE_PARTITION_KEY))
     DBUG_RETURN(FALSE);
   f.rewind();
   while ((item_field=(Item_field*) f++))
@@ -2262,11 +2623,14 @@
 
 /*
   Calculate hash value for KEY partitioning using an array of fields.
+
   SYNOPSIS
     calculate_key_value()
     field_array             An array of the fields in KEY partitioning
+
   RETURN VALUE
     hash_value calculated
+
   DESCRIPTION
     Uses the hash function on the character set of the field. Integer and
     floating point fields use the binary character set by default.
@@ -2276,6 +2640,7 @@
 {
   uint32 hashnr= 0;
   ulong nr2= 4;
+
   do
   {
     Field *field= *field_array;
@@ -2299,6 +2664,7 @@
 /*
   A simple support function to calculate part_id given local part and
   sub part.
+
   SYNOPSIS
     get_part_id_for_sub()
     loc_part_id             Local partition id
@@ -2316,31 +2682,40 @@
 
 /*
   Calculate part_id for (SUB)PARTITION BY HASH
+
   SYNOPSIS
     get_part_id_hash()
     no_parts                 Number of hash partitions
     part_expr                Item tree of hash function
+    out:func_value      Value of hash function
+
   RETURN VALUE
     Calculated partition id
 */
 
 inline
 static uint32 get_part_id_hash(uint no_parts,
-                               Item *part_expr)
+                               Item *part_expr,
+                               longlong *func_value)
 {
   DBUG_ENTER("get_part_id_hash");
-  DBUG_RETURN((uint32)(part_expr->val_int() % no_parts));
+  *func_value= part_expr->val_int();
+  longlong int_hash_id= *func_value % no_parts;
+  DBUG_RETURN(int_hash_id < 0 ? -int_hash_id : int_hash_id);
 }
 
 
 /*
   Calculate part_id for (SUB)PARTITION BY LINEAR HASH
+
   SYNOPSIS
     get_part_id_linear_hash()
     part_info           A reference to the partition_info struct where all the
                         desired information is given
     no_parts            Number of hash partitions
     part_expr           Item tree of hash function
+    out:func_value      Value of hash function
+
   RETURN VALUE
     Calculated partition id
 */
@@ -2348,10 +2723,13 @@
 inline
 static uint32 get_part_id_linear_hash(partition_info *part_info,
                                       uint no_parts,
-                                      Item *part_expr)
+                                      Item *part_expr,
+                                      longlong *func_value)
 {
   DBUG_ENTER("get_part_id_linear_hash");
-  DBUG_RETURN(get_part_id_from_linear_hash(part_expr->val_int(),
+
+  *func_value= part_expr->val_int();
+  DBUG_RETURN(get_part_id_from_linear_hash(*func_value,
                                            part_info->linear_hash_mask,
                                            no_parts));
 }
@@ -2359,31 +2737,37 @@
 
 /*
   Calculate part_id for (SUB)PARTITION BY KEY
+
   SYNOPSIS
     get_part_id_key()
     field_array         Array of fields for PARTTION KEY
     no_parts            Number of KEY partitions
+
   RETURN VALUE
     Calculated partition id
 */
 
 inline
 static uint32 get_part_id_key(Field **field_array,
-                              uint no_parts)
+                              uint no_parts,
+                              longlong *func_value)
 {
   DBUG_ENTER("get_part_id_key");
-  DBUG_RETURN(calculate_key_value(field_array) % no_parts);
+  *func_value= calculate_key_value(field_array);
+  DBUG_RETURN(*func_value % no_parts);
 }
 
 
 /*
   Calculate part_id for (SUB)PARTITION BY LINEAR KEY
+
   SYNOPSIS
     get_part_id_linear_key()
     part_info           A reference to the partition_info struct where all the
                         desired information is given
     field_array         Array of fields for PARTTION KEY
     no_parts            Number of KEY partitions
+
   RETURN VALUE
     Calculated partition id
 */
@@ -2391,10 +2775,13 @@
 inline
 static uint32 get_part_id_linear_key(partition_info *part_info,
                                      Field **field_array,
-                                     uint no_parts)
+                                     uint no_parts,
+                                     longlong *func_value)
 {
   DBUG_ENTER("get_partition_id_linear_key");
-  DBUG_RETURN(get_part_id_from_linear_hash(calculate_key_value(field_array),
+
+  *func_value= calculate_key_value(field_array);
+  DBUG_RETURN(get_part_id_from_linear_hash(*func_value,
                                            part_info->linear_hash_mask,
                                            no_parts));
 }
@@ -2403,15 +2790,18 @@
   This function is used to calculate the partition id where all partition
   fields have been prepared to point to a record where the partition field
   values are bound.
+
   SYNOPSIS
     get_partition_id()
     part_info           A reference to the partition_info struct where all the
                         desired information is given
-    part_id             The partition id is returned through this pointer
+    out:part_id         The partition id is returned through this pointer
+
   RETURN VALUE
     part_id
     return TRUE means that the fields of the partition function didn't fit
     into any partition and thus the values of the PF-fields are not allowed.
+
   DESCRIPTION
     A routine used from write_row, update_row and delete_row from any
     handler supporting partitioning. It is also a support routine for
@@ -2441,15 +2831,18 @@
   This function is used to calculate the main partition to use in the case of
   subpartitioning and we don't know enough to get the partition identity in
   total.
+
   SYNOPSIS
     get_part_partition_id()
     part_info           A reference to the partition_info struct where all the
                         desired information is given
-    part_id             The partition id is returned through this pointer
+    out:part_id         The partition id is returned through this pointer
+
   RETURN VALUE
     part_id
     return TRUE means that the fields of the partition function didn't fit
     into any partition and thus the values of the PF-fields are not allowed.
+
   DESCRIPTION
     
     It is actually 6 different variants of this function which are called
@@ -2464,15 +2857,19 @@
 */
 
 
-bool get_partition_id_list(partition_info *part_info,
-                           uint32 *part_id)
+int get_partition_id_list(partition_info *part_info,
+                           uint32 *part_id,
+                           longlong *func_value)
 {
-  DBUG_ENTER("get_partition_id_list");
   LIST_PART_ENTRY *list_array= part_info->list_array;
-  uint list_index;
+  int list_index;
   longlong list_value;
-  uint min_list_index= 0, max_list_index= part_info->no_list_values - 1;
+  int min_list_index= 0;
+  int max_list_index= part_info->no_list_values - 1;
   longlong part_func_value= part_info->part_expr->val_int();
+  DBUG_ENTER("get_partition_id_list");
+
+  *func_value= part_func_value;
   while (max_list_index >= min_list_index)
   {
     list_index= (max_list_index + min_list_index) >> 1;
@@ -2488,12 +2885,12 @@
     else
     {
       *part_id= (uint32)list_array[list_index].partition_id;
-      DBUG_RETURN(FALSE);
+      DBUG_RETURN(0);
     }
   }
 notfound:
   *part_id= 0;
-  DBUG_RETURN(TRUE);
+  DBUG_RETURN(HA_ERR_NO_PARTITION_FOUND);
 }
 
 
@@ -2570,14 +2967,18 @@
 }
 
 
-bool get_partition_id_range(partition_info *part_info,
-                            uint32 *part_id)
+int get_partition_id_range(partition_info *part_info,
+                            uint32 *part_id,
+                            longlong *func_value)
 {
-  DBUG_ENTER("get_partition_id_int_range");
   longlong *range_array= part_info->range_int_array;
   uint max_partition= part_info->no_parts - 1;
-  uint min_part_id= 0, max_part_id= max_partition, loc_part_id;
+  uint min_part_id= 0;
+  uint max_part_id= max_partition;
+  uint loc_part_id;
   longlong part_func_value= part_info->part_expr->val_int();
+  DBUG_ENTER("get_partition_id_int_range");
+
   while (max_part_id > min_part_id)
   {
     loc_part_id= (max_part_id + min_part_id + 1) >> 1;
@@ -2591,11 +2992,12 @@
     if (loc_part_id != max_partition)
       loc_part_id++;
   *part_id= (uint32)loc_part_id;
+  *func_value= part_func_value;
   if (loc_part_id == max_partition)
     if (range_array[loc_part_id] != LONGLONG_MAX)
       if (part_func_value >= range_array[loc_part_id])
-        DBUG_RETURN(TRUE);
-  DBUG_RETURN(FALSE);
+        DBUG_RETURN(HA_ERR_NO_PARTITION_FOUND);
+  DBUG_RETURN(0);
 }
 
 
@@ -2681,191 +3083,246 @@
 }
 
 
-bool get_partition_id_hash_nosub(partition_info *part_info,
-                                 uint32 *part_id)
+int get_partition_id_hash_nosub(partition_info *part_info,
+                                 uint32 *part_id,
+                                 longlong *func_value)
 {
-  *part_id= get_part_id_hash(part_info->no_parts, part_info->part_expr);
-  return FALSE;
+  *part_id= get_part_id_hash(part_info->no_parts, part_info->part_expr,
+                             func_value);
+  return 0;
 }
 
 
-bool get_partition_id_linear_hash_nosub(partition_info *part_info,
-                                        uint32 *part_id)
+int get_partition_id_linear_hash_nosub(partition_info *part_info,
+                                        uint32 *part_id,
+                                        longlong *func_value)
 {
   *part_id= get_part_id_linear_hash(part_info, part_info->no_parts,
-                                    part_info->part_expr);
-  return FALSE;
+                                    part_info->part_expr, func_value);
+  return 0;
 }
 
 
-bool get_partition_id_key_nosub(partition_info *part_info,
-                                uint32 *part_id)
+int get_partition_id_key_nosub(partition_info *part_info,
+                                uint32 *part_id,
+                                longlong *func_value)
 {
-  *part_id= get_part_id_key(part_info->part_field_array, part_info->no_parts);
-  return FALSE;
+  *part_id= get_part_id_key(part_info->part_field_array,
+                            part_info->no_parts, func_value);
+  return 0;
 }
 
 
-bool get_partition_id_linear_key_nosub(partition_info *part_info,
-                                       uint32 *part_id)
+int get_partition_id_linear_key_nosub(partition_info *part_info,
+                                       uint32 *part_id,
+                                       longlong *func_value)
 {
   *part_id= get_part_id_linear_key(part_info,
                                    part_info->part_field_array,
-                                   part_info->no_parts);
-  return FALSE;
+                                   part_info->no_parts, func_value);
+  return 0;
 }
 
 
-bool get_partition_id_range_sub_hash(partition_info *part_info,
-                                     uint32 *part_id)
+int get_partition_id_range_sub_hash(partition_info *part_info,
+                                     uint32 *part_id,
+                                     longlong *func_value)
 {
   uint32 loc_part_id, sub_part_id;
   uint no_subparts;
+  longlong local_func_value;
+  int error;
   DBUG_ENTER("get_partition_id_range_sub_hash");
-  if (unlikely(get_partition_id_range(part_info, &loc_part_id)))
+
+  if (unlikely((error= get_partition_id_range(part_info, &loc_part_id,
+                                              func_value))))
   {
-    DBUG_RETURN(TRUE);
+    DBUG_RETURN(error);
   }
   no_subparts= part_info->no_subparts;
-  sub_part_id= get_part_id_hash(no_subparts, part_info->subpart_expr);
+  sub_part_id= get_part_id_hash(no_subparts, part_info->subpart_expr,
+                                &local_func_value);
   *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
-  DBUG_RETURN(FALSE);
+  DBUG_RETURN(0);
 }
 
 
-bool get_partition_id_range_sub_linear_hash(partition_info *part_info,
-                                            uint32 *part_id)
+int get_partition_id_range_sub_linear_hash(partition_info *part_info,
+                                            uint32 *part_id,
+                                            longlong *func_value)
 {
   uint32 loc_part_id, sub_part_id;
   uint no_subparts;
+  longlong local_func_value;
+  int error;
   DBUG_ENTER("get_partition_id_range_sub_linear_hash");
-  if (unlikely(get_partition_id_range(part_info, &loc_part_id)))
+
+  if (unlikely((error= get_partition_id_range(part_info, &loc_part_id,
+                                              func_value))))
   {
-    DBUG_RETURN(TRUE);
+    DBUG_RETURN(error);
   }
   no_subparts= part_info->no_subparts;
   sub_part_id= get_part_id_linear_hash(part_info, no_subparts,
-                                       part_info->subpart_expr);
+                                       part_info->subpart_expr,
+                                       &local_func_value);
   *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
-  DBUG_RETURN(FALSE);
+  DBUG_RETURN(0);
 }
 
 
-bool get_partition_id_range_sub_key(partition_info *part_info,
-                                    uint32 *part_id)
+int get_partition_id_range_sub_key(partition_info *part_info,
+                                    uint32 *part_id,
+                                    longlong *func_value)
 {
   uint32 loc_part_id, sub_part_id;
   uint no_subparts;
+  longlong local_func_value;
+  int error;
   DBUG_ENTER("get_partition_id_range_sub_key");
-  if (unlikely(get_partition_id_range(part_info, &loc_part_id)))
+
+  if (unlikely((error= get_partition_id_range(part_info, &loc_part_id,
+                                              func_value))))
   {
-    DBUG_RETURN(TRUE);
+    DBUG_RETURN(error);
   }
   no_subparts= part_info->no_subparts;
-  sub_part_id= get_part_id_key(part_info->subpart_field_array, no_subparts);
+  sub_part_id= get_part_id_key(part_info->subpart_field_array,
+                               no_subparts, &local_func_value);
   *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
-  DBUG_RETURN(FALSE);
+  DBUG_RETURN(0);
 }
 
 
-bool get_partition_id_range_sub_linear_key(partition_info *part_info,
-                                           uint32 *part_id)
+int get_partition_id_range_sub_linear_key(partition_info *part_info,
+                                           uint32 *part_id,
+                                           longlong *func_value)
 {
   uint32 loc_part_id, sub_part_id;
   uint no_subparts;
+  longlong local_func_value;
+  int error;
   DBUG_ENTER("get_partition_id_range_sub_linear_key");
-  if (unlikely(get_partition_id_range(part_info, &loc_part_id)))
+
+  if (unlikely((error= get_partition_id_range(part_info, &loc_part_id,
+                                              func_value))))
   {
-    DBUG_RETURN(TRUE);
+    DBUG_RETURN(error);
   }
   no_subparts= part_info->no_subparts;
   sub_part_id= get_part_id_linear_key(part_info,
                                       part_info->subpart_field_array,
-                                      no_subparts);
+                                      no_subparts, &local_func_value);
   *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
-  DBUG_RETURN(FALSE);
+  DBUG_RETURN(0);
 }
 
 
-bool get_partition_id_list_sub_hash(partition_info *part_info,
-                                    uint32 *part_id)
+int get_partition_id_list_sub_hash(partition_info *part_info,
+                                    uint32 *part_id,
+                                    longlong *func_value)
 {
   uint32 loc_part_id, sub_part_id;
   uint no_subparts;
+  longlong local_func_value;
+  int error;
   DBUG_ENTER("get_partition_id_list_sub_hash");
-  if (unlikely(get_partition_id_list(part_info, &loc_part_id)))
+
+  if (unlikely((error= get_partition_id_list(part_info, &loc_part_id,
+                                             func_value))))
   {
-    DBUG_RETURN(TRUE);
+    DBUG_RETURN(error);
   }
   no_subparts= part_info->no_subparts;
-  sub_part_id= get_part_id_hash(no_subparts, part_info->subpart_expr);
+  sub_part_id= get_part_id_hash(no_subparts, part_info->subpart_expr,
+                                &local_func_value);
   *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
-  DBUG_RETURN(FALSE);
+  DBUG_RETURN(0);
 }
 
 
-bool get_partition_id_list_sub_linear_hash(partition_info *part_info,
-                                           uint32 *part_id)
+int get_partition_id_list_sub_linear_hash(partition_info *part_info,
+                                           uint32 *part_id,
+                                           longlong *func_value)
 {
   uint32 loc_part_id, sub_part_id;
   uint no_subparts;
+  longlong local_func_value;
+  int error;
   DBUG_ENTER("get_partition_id_list_sub_linear_hash");
-  if (unlikely(get_partition_id_list(part_info, &loc_part_id)))
+
+  if (unlikely((error= get_partition_id_list(part_info, &loc_part_id,
+                                             func_value))))
   {
-    DBUG_RETURN(TRUE);
+    DBUG_RETURN(error);
   }
   no_subparts= part_info->no_subparts;
-  sub_part_id= get_part_id_hash(no_subparts, part_info->subpart_expr);
+  sub_part_id= get_part_id_linear_hash(part_info, no_subparts,
+                                       part_info->subpart_expr,
+                                       &local_func_value);
   *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
-  DBUG_RETURN(FALSE);
+  DBUG_RETURN(0);
 }
 
 
-bool get_partition_id_list_sub_key(partition_info *part_info,
-                                   uint32 *part_id)
+int get_partition_id_list_sub_key(partition_info *part_info,
+                                   uint32 *part_id,
+                                   longlong *func_value)
 {
   uint32 loc_part_id, sub_part_id;
   uint no_subparts;
+  longlong local_func_value;
+  int error;
   DBUG_ENTER("get_partition_id_range_sub_key");
-  if (unlikely(get_partition_id_list(part_info, &loc_part_id)))
+
+  if (unlikely((error= get_partition_id_list(part_info, &loc_part_id,
+                                             func_value))))
   {
-    DBUG_RETURN(TRUE);
+    DBUG_RETURN(error);
   }
   no_subparts= part_info->no_subparts;
-  sub_part_id= get_part_id_key(part_info->subpart_field_array, no_subparts);
+  sub_part_id= get_part_id_key(part_info->subpart_field_array,
+                               no_subparts, &local_func_value);
   *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
-  DBUG_RETURN(FALSE);
+  DBUG_RETURN(0);
 }
 
 
-bool get_partition_id_list_sub_linear_key(partition_info *part_info,
-                                          uint32 *part_id)
+int get_partition_id_list_sub_linear_key(partition_info *part_info,
+                                          uint32 *part_id,
+                                          longlong *func_value)
 {
   uint32 loc_part_id, sub_part_id;
   uint no_subparts;
+  longlong local_func_value;
+  int error;
   DBUG_ENTER("get_partition_id_list_sub_linear_key");
-  if (unlikely(get_partition_id_list(part_info, &loc_part_id)))
+
+  if (unlikely((error= get_partition_id_list(part_info, &loc_part_id,
+                                             func_value))))
   {
-    DBUG_RETURN(TRUE);
+    DBUG_RETURN(error);
   }
   no_subparts= part_info->no_subparts;
   sub_part_id= get_part_id_linear_key(part_info,
                                       part_info->subpart_field_array,
-                                      no_subparts);
+                                      no_subparts, &local_func_value);
   *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
-  DBUG_RETURN(FALSE);
+  DBUG_RETURN(0);
 }
 
 
 /*
   This function is used to calculate the subpartition id
+
   SYNOPSIS
     get_subpartition_id()
     part_info           A reference to the partition_info struct where all the
                         desired information is given
+
   RETURN VALUE
-    part_id
-    The subpartition identity
+    part_id             The subpartition identity
+
   DESCRIPTION
     A routine used in some SELECT's when only partial knowledge of the
     partitions is known.
@@ -2881,38 +3338,45 @@
 
 uint32 get_partition_id_hash_sub(partition_info *part_info)
 {
-  return get_part_id_hash(part_info->no_subparts, part_info->subpart_expr);
+  longlong func_value;
+  return get_part_id_hash(part_info->no_subparts, part_info->subpart_expr,
+                          &func_value);
 }
 
 
 uint32 get_partition_id_linear_hash_sub(partition_info *part_info)
 {
+  longlong func_value;
   return get_part_id_linear_hash(part_info, part_info->no_subparts,
-                                 part_info->subpart_expr);
+                                 part_info->subpart_expr, &func_value);
 }
 
 
 uint32 get_partition_id_key_sub(partition_info *part_info)
 {
+  longlong func_value;
   return get_part_id_key(part_info->subpart_field_array,
-                         part_info->no_subparts);
+                         part_info->no_subparts, &func_value);
 }
 
 
 uint32 get_partition_id_linear_key_sub(partition_info *part_info)
 {
+  longlong func_value;
   return get_part_id_linear_key(part_info,
                                 part_info->subpart_field_array,
-                                part_info->no_subparts);
+                                part_info->no_subparts, &func_value);
 }
 
 
 /*
-  Set an indicator on all partition fields that are set by the key 
+  Set an indicator on all partition fields that are set by the key
+
   SYNOPSIS
     set_PF_fields_in_key()
     key_info                   Information about the index
     key_length                 Length of key
+
   RETURN VALUE
     TRUE                       Found partition field set by key
     FALSE                      No partition field set by key
@@ -2953,9 +3417,11 @@
 /*
   We have found that at least one partition field was set by a key, now
   check if a partition function has all its fields bound or not.
+
   SYNOPSIS
     check_part_func_bound()
     ptr                     Array of fields NULL terminated (partition fields)
+
   RETURN VALUE
     TRUE                    All fields in partition function are set
     FALSE                   Not all fields in partition function are set
@@ -2981,14 +3447,17 @@
 /*
   Get the id of the subpartitioning part by using the key buffer of the
   index scan.
+
   SYNOPSIS
     get_sub_part_id_from_key()
     table         The table object
     buf           A buffer that can be used to evaluate the partition function
     key_info      The index object
     key_spec      A key_range containing key and key length
+
   RETURN VALUES
     part_id       Subpartition id to use
+
   DESCRIPTION
     Use key buffer to set-up record in buf, move field pointers and
     get the partition identity and restore field pointers afterwards.
@@ -3019,36 +3488,43 @@
 /*
   Get the id of the partitioning part by using the key buffer of the
   index scan.
+
   SYNOPSIS
     get_part_id_from_key()
     table         The table object
     buf           A buffer that can be used to evaluate the partition function
     key_info      The index object
     key_spec      A key_range containing key and key length
-    part_id       Partition to use
+    out:part_id   Partition to use
+
   RETURN VALUES
     TRUE          Partition to use not found
     FALSE         Ok, part_id indicates partition to use
+
   DESCRIPTION
     Use key buffer to set-up record in buf, move field pointers and
     get the partition identity and restore field pointers afterwards.
 */
+
 bool get_part_id_from_key(const TABLE *table, byte *buf, KEY *key_info,
                           const key_range *key_spec, uint32 *part_id)
 {
   bool result;
   byte *rec0= table->record[0];
   partition_info *part_info= table->part_info;
+  longlong func_value;
   DBUG_ENTER("get_part_id_from_key");
 
   key_restore(buf, (byte*)key_spec->key, key_info, key_spec->length);
   if (likely(rec0 == buf))
-    result= part_info->get_part_partition_id(part_info, part_id);
+    result= part_info->get_part_partition_id(part_info, part_id,
+                                             &func_value);
   else
   {
     Field **part_field_array= part_info->part_field_array;
     set_field_ptr(part_field_array, buf, rec0);
-    result= part_info->get_part_partition_id(part_info, part_id);
+    result= part_info->get_part_partition_id(part_info, part_id,
+                                             &func_value);
     set_field_ptr(part_field_array, rec0, buf);
   }
   DBUG_RETURN(result);
@@ -3057,16 +3533,19 @@
 /*
   Get the partitioning id of the full PF by using the key buffer of the
   index scan.
+
   SYNOPSIS
     get_full_part_id_from_key()
     table         The table object
     buf           A buffer that is used to evaluate the partition function
     key_info      The index object
     key_spec      A key_range containing key and key length
-    part_spec     A partition id containing start part and end part
+    out:part_spec A partition id containing start part and end part
+
   RETURN VALUES
     part_spec
     No partitions to scan is indicated by end_part > start_part when returning
+
   DESCRIPTION
     Use key buffer to set-up record in buf, move field pointers if needed and
     get the partition identity and restore field pointers afterwards.
@@ -3080,16 +3559,19 @@
   bool result;
   partition_info *part_info= table->part_info;
   byte *rec0= table->record[0];
+  longlong func_value;
   DBUG_ENTER("get_full_part_id_from_key");
 
   key_restore(buf, (byte*)key_spec->key, key_info, key_spec->length);
   if (likely(rec0 == buf))
-    result= part_info->get_partition_id(part_info, &part_spec->start_part);
+    result= part_info->get_partition_id(part_info, &part_spec->start_part,
+                                        &func_value);
   else
   {
     Field **part_field_array= part_info->full_part_field_array;
     set_field_ptr(part_field_array, buf, rec0);
-    result= part_info->get_partition_id(part_info, &part_spec->start_part);
+    result= part_info->get_partition_id(part_info, &part_spec->start_part,
+                                        &func_value);
     set_field_ptr(part_field_array, rec0, buf);
   }
   part_spec->end_part= part_spec->start_part;
@@ -3100,14 +3582,16 @@
     
 /*
   Get the set of partitions to use in query.
+
   SYNOPSIS
     get_partition_set()
     table         The table object
     buf           A buffer that can be used to evaluate the partition function
     index         The index of the key used, if MAX_KEY no index used
     key_spec      A key_range containing key and key length
-    part_spec     Contains start part, end part and indicator if bitmap is
+    out:part_spec Contains start part, end part and indicator if bitmap is
                   used for which partitions to scan
+
   DESCRIPTION
     This function is called to discover which partitions to use in an index
     scan or a full table scan.
@@ -3117,6 +3601,7 @@
     If start_part > end_part at return it means no partition needs to be
     scanned. If start_part == end_part it always means a single partition
     needs to be scanned.
+
   RETURN VALUE
     part_spec
 */
@@ -3124,7 +3609,8 @@
                        const key_range *key_spec, part_id_range *part_spec)
 {
   partition_info *part_info= table->part_info;
-  uint no_parts= get_tot_partitions(part_info), i, part_id;
+  uint no_parts= get_tot_partitions(part_info);
+  uint i, part_id;
   uint sub_part= no_parts;
   uint32 part_part= no_parts;
   KEY *key_info= NULL;
@@ -3166,7 +3652,8 @@
           sub_part= get_sub_part_id_from_key(table, buf, key_info, key_spec);
         else if (part_info->all_fields_in_PPF.is_set(index))
         {
-          if (get_part_id_from_key(table,buf,key_info,key_spec,(uint32*)&part_part))
+          if (get_part_id_from_key(table,buf,key_info,
+                                   key_spec,(uint32*)&part_part))
           {
             /*
               The value of the RANGE or LIST partitioning was outside of
@@ -3201,15 +3688,18 @@
           clear_indicator_in_key_fields(key_info);
           DBUG_VOID_RETURN; 
         }
-        else if (check_part_func_bound(part_info->part_field_array))
-          sub_part= get_sub_part_id_from_key(table, buf, key_info, key_spec);
-        else if (check_part_func_bound(part_info->subpart_field_array))
+        else if (is_sub_partitioned(part_info))
         {
-          if (get_part_id_from_key(table,buf,key_info,key_spec,(uint32*)&part_part))
+          if (check_part_func_bound(part_info->subpart_field_array))
+            sub_part= get_sub_part_id_from_key(table, buf, key_info, key_spec);
+          else if (check_part_func_bound(part_info->part_field_array))
           {
-            part_spec->start_part= no_parts;
-            clear_indicator_in_key_fields(key_info);
-            DBUG_VOID_RETURN;
+            if (get_part_id_from_key(table,buf,key_info,key_spec,&part_part))
+            {
+              part_spec->start_part= no_parts;
+              clear_indicator_in_key_fields(key_info);
+              DBUG_VOID_RETURN;
+            }
           }
         }
       }
@@ -3278,10 +3768,10 @@
    | Forminfo     288 bytes      |
    -------------------------------
    | Screen buffer, to make      |
-   | field names readable        |
    -------------------------------
    | Packed field info           |
-   | 17 + 1 + strlen(field_name) |
    | + 1 end of file character   |
    -------------------------------
    | Partition info              |
@@ -3290,15 +3780,20 @@
 
    Read the partition syntax from the frm file and parse it to get the
    data structures of the partitioning.
+
    SYNOPSIS
      mysql_unpack_partition()
-     file                          File reference of frm file
      thd                           Thread object
+     part_buf                      Partition info from frm file
      part_info_len                 Length of partition syntax
      table                         Table object of partitioned table
+     create_table_ind              Is it called from CREATE TABLE
+     default_db_type               What is the default engine of the table
+
    RETURN VALUE
      TRUE                          Error
      FALSE                         Sucess
+
    DESCRIPTION
      Read the partition syntax from the current position in the frm file.
      Initiate a LEX object, save the list of item tree objects to free after
@@ -3311,13 +3806,16 @@
 */
 
 bool mysql_unpack_partition(THD *thd, const uchar *part_buf,
-                            uint part_info_len, TABLE* table,
+                            uint part_info_len,
+                            uchar *part_state, uint part_state_len,
+                            TABLE* table, bool is_create_table_ind,
                             handlerton *default_db_type)
 {
   Item *thd_free_list= thd->free_list;
   bool result= TRUE;
   partition_info *part_info;
-  LEX *old_lex= thd->lex, lex;
+  LEX *old_lex= thd->lex;
+  LEX lex;
   DBUG_ENTER("mysql_unpack_partition");
 
   thd->lex= &lex;
@@ -3340,13 +3838,59 @@
     we then save in the partition info structure.
   */
   thd->free_list= NULL;
-  lex.part_info= (partition_info*)1; //Indicate yyparse from this place
+  lex.part_info= new partition_info();/* Indicates yyparse from this place */
+  if (!lex.part_info)
+  {
+    mem_alloc_error(sizeof(partition_info));
+    goto end;
+  }
+  lex.part_info->part_state= part_state;
+  lex.part_info->part_state_len= part_state_len;
+  DBUG_PRINT("info", ("Parse: %s", part_buf));
   if (yyparse((void*)thd) || thd->is_fatal_error)
   {
     free_items(thd->free_list);
     goto end;
   }
+  /*
+    The parsed syntax residing in the frm file can still contain defaults.
+    The reason is that the frm file is sometimes saved outside of this
+    MySQL Server and used in backup and restore of clusters or partitioned
+    tables. It is not certain that the restore will restore exactly the
+    same default partitioning.
+    
+    The easiest manner of handling this is to simply continue using the
+    part_info we already built up during mysql_create_table if we are
+    in the process of creating a table. If the table already exists we
+    need to discover the number of partitions for the default parts. Since
+    the handler object hasn't been created here yet we need to postpone this
+    to the fix_partition_func method.
+  */
+
+  DBUG_PRINT("info", ("Successful parse"));
   part_info= lex.part_info;
+  DBUG_PRINT("info", ("default engine = %d", ha_legacy_type(part_info->default_engine_type)));
+  if (is_create_table_ind)
+  {
+    if (old_lex->name)
+    {
+      /*
+        This code is executed when we do a CREATE TABLE t1 LIKE t2
+        old_lex->name contains the t2 and the table we are opening has 
+        name t1.
+      */
+      Table_ident *ti= (Table_ident*)old_lex->name;
+      const char *db_name= ti->db.str ? ti->db.str : thd->db;
+      const char *table_name= ti->table.str;
+      handler *file;
+      if (partition_default_handling(table, part_info))
+      {
+        DBUG_RETURN(TRUE);
+      }
+    }
+    else
+      part_info= old_lex->part_info;
+  }
   table->part_info= part_info;
   table->file->set_part_info(part_info);
   if (part_info->default_engine_type == NULL)
@@ -3369,30 +3913,25 @@
   */
     uint part_func_len= part_info->part_func_len;
     uint subpart_func_len= part_info->subpart_func_len; 
-    uint bitmap_bits= part_info->no_subparts? 
-                       (part_info->no_subparts* part_info->no_parts):
-                        part_info->no_parts;
-    uint bitmap_bytes= bitmap_buffer_size(bitmap_bits);
-    uint32 *bitmap_buf;
-    char *part_func_string, *subpart_func_string= NULL;
-    if (!((part_func_string= thd->alloc(part_func_len))) ||
+    char *part_func_string= NULL;
+    char *subpart_func_string= NULL;
+    if ((part_func_len &&
+        !((part_func_string= thd->alloc(part_func_len)))) ||
         (subpart_func_len &&
-        !((subpart_func_string= thd->alloc(subpart_func_len)))) ||
-        !((bitmap_buf= (uint32*)thd->alloc(bitmap_bytes))))
+        !((subpart_func_string= thd->alloc(subpart_func_len)))))
     {
-      my_error(ER_OUTOFMEMORY, MYF(0), part_func_len);
+      mem_alloc_error(part_func_len);
       free_items(thd->free_list);
       part_info->item_free_list= 0;
       goto end;
     }
-    memcpy(part_func_string, part_info->part_func_string, part_func_len);
+    if (part_func_len)
+      memcpy(part_func_string, part_info->part_func_string, part_func_len);
     if (subpart_func_len)
       memcpy(subpart_func_string, part_info->subpart_func_string,
              subpart_func_len);
     part_info->part_func_string= part_func_string;
     part_info->subpart_func_string= subpart_func_string;
-
-    bitmap_init(&part_info->used_partitions, bitmap_buf, bitmap_bytes*8, FALSE);
   }
 
   result= FALSE;
@@ -3401,16 +3940,1482 @@
   thd->lex= old_lex;
   DBUG_RETURN(result);
 }
+
+
+/*
+  SYNOPSIS
+    fast_alter_partition_error_handler()
+    lpt                           Container for parameters
+
+  RETURN VALUES
+    None
+
+  DESCRIPTION
+    Support routine to clean up after failures of on-line ALTER TABLE
+    for partition management.
+*/
+
+static void fast_alter_partition_error_handler(ALTER_PARTITION_PARAM_TYPE *lpt)
+{
+  DBUG_ENTER("fast_alter_partition_error_handler");
+  /* TODO: WL 2826 Error handling */
+  DBUG_VOID_RETURN;
+}
+
+
+/*
+  SYNOPSIS
+    fast_end_partition()
+    thd                           Thread object
+    out:copied                    Number of records copied
+    out:deleted                   Number of records deleted
+    table_list                    Table list with the one table in it
+    empty                         Has nothing been done
+    lpt                           Struct to be used by error handler
+
+  RETURN VALUES
+    FALSE                         Success
+    TRUE                          Failure
+
+  DESCRIPTION
+    Support routine to handle the successful cases for partition
+    management.
+*/
+
+static int fast_end_partition(THD *thd, ulonglong copied,
+                              ulonglong deleted,
+                              TABLE_LIST *table_list, bool is_empty,
+                              ALTER_PARTITION_PARAM_TYPE *lpt,
+                              bool written_bin_log)
+{
+  int error;
+  DBUG_ENTER("fast_end_partition");
+
+  thd->proc_info="end";
+  if (!is_empty)
+    query_cache_invalidate3(thd, table_list, 0);
+  error= ha_commit_stmt(thd);
+  if (ha_commit(thd))
+    error= 1;
+  if (!error || is_empty)
+  {
+    char tmp_name[80];
+    if ((!is_empty) && (!written_bin_log) &&
+        (!thd->lex->no_write_to_binlog))
+      write_bin_log(thd, FALSE, thd->query, thd->query_length);
+    close_thread_tables(thd);
+    my_snprintf(tmp_name, sizeof(tmp_name), ER(ER_INSERT_INFO),
+                (ulong) (copied + deleted),
+                (ulong) deleted,
+                (ulong) 0);
+    send_ok(thd,copied+deleted,0L,tmp_name);
+    DBUG_RETURN(FALSE);
+  }
+  fast_alter_partition_error_handler(lpt);
+  DBUG_RETURN(TRUE);
+}
+
+
+/*
+  Check engine mix that it is correct
+  SYNOPSIS
+    check_engine_condition()
+    p_elem                   Partition element
+    default_engine           Have user specified engine on table level
+    inout::engine_type       Current engine used
+    inout::first             Is it first partition
+  RETURN VALUE
+    TRUE                     Failed check
+    FALSE                    Ok
+  DESCRIPTION
+    (specified partition handler ) specified table handler
+    (NDB, NDB) NDB           OK
+    (MYISAM, MYISAM) -       OK
+    (MYISAM, -)      -       NOT OK
+    (MYISAM, -)    MYISAM    OK
+    (- , MYISAM)   -         NOT OK
+    (- , -)        MYISAM    OK
+    (-,-)          -         OK
+    (NDB, MYISAM) *          NOT OK
+*/
+
+static bool check_engine_condition(partition_element *p_elem,
+                                   bool default_engine,
+                                   handlerton **engine_type,
+                                   bool *first)
+{
+  if (*first && default_engine)
+    *engine_type= p_elem->engine_type;
+  *first= FALSE;
+  if ((!default_engine &&
+      (p_elem->engine_type != *engine_type &&
+       !p_elem->engine_type)) ||
+      (default_engine &&
+       p_elem->engine_type != *engine_type))
+    return TRUE;
+  else
+    return FALSE;
+}
+
+/*
+  We need to check if engine used by all partitions can handle
+  partitioning natively.
+
+  SYNOPSIS
+    check_native_partitioned()
+    create_info            Create info in CREATE TABLE
+    out:ret_val            Return value
+    part_info              Partition info
+    thd                    Thread object
+
+  RETURN VALUES
+  Value returned in bool ret_value
+    TRUE                   Native partitioning supported by engine
+    FALSE                  Need to use partition handler
+
+  Return value from function
+    TRUE                   Error
+    FALSE                  Success
+*/
+
+static bool check_native_partitioned(HA_CREATE_INFO *create_info,bool *ret_val,
+                                     partition_info *part_info, THD *thd)
+{
+  List_iterator<partition_element> part_it(part_info->partitions);
+  bool first= TRUE;
+  bool default_engine;
+  handlerton *engine_type= create_info->db_type;
+  handlerton *old_engine_type= engine_type;
+  uint i= 0;
+  handler *file;
+  uint no_parts= part_info->partitions.elements;
+  DBUG_ENTER("check_native_partitioned");
+
+  default_engine= (create_info->used_fields | HA_CREATE_USED_ENGINE) ?
+                   TRUE : FALSE;
+  DBUG_PRINT("info", ("engine_type = %u, default = %u",
+                       ha_legacy_type(engine_type),
+                       default_engine));
+  if (no_parts)
+  {
+    do
+    {
+      partition_element *part_elem= part_it++;
+      if (is_sub_partitioned(part_info) &&
+          part_elem->subpartitions.elements)
+      {
+        uint no_subparts= part_elem->subpartitions.elements;
+        uint j= 0;
+        List_iterator<partition_element> sub_it(part_elem->subpartitions);
+        do
+        {
+          partition_element *sub_elem= sub_it++;
+          if (check_engine_condition(sub_elem, default_engine,
+                                     &engine_type, &first))
+            goto error;
+        } while (++j < no_subparts);
+        /*
+          In case of subpartitioning and defaults we allow that only
+          subparts have specified engines, as long as the parts haven't
+          specified the wrong engine it's ok.
+        */
+        if (check_engine_condition(part_elem, FALSE,
+                                   &engine_type, &first))
+          goto error;
+      }
+      else if (check_engine_condition(part_elem, default_engine,
+                                      &engine_type, &first))
+        goto error;
+    } while (++i < no_parts);
+  }
+
+  /*
+    All engines are of the same type. Check if this engine supports
+    native partitioning.
+  */
+
+  if (!engine_type)
+    engine_type= old_engine_type;
+  DBUG_PRINT("info", ("engine_type = %s",
+              ha_resolve_storage_engine_name(engine_type)));
+  if (engine_type->partition_flags &&
+      (engine_type->partition_flags() & HA_CAN_PARTITION))
+  {
+    create_info->db_type= engine_type;
+    DBUG_PRINT("info", ("Changed to native partitioning"));
+    *ret_val= TRUE;
+  }
+  DBUG_RETURN(FALSE);
+error:
+  /*
+    Mixed engines not yet supported but when supported it will need
+    the partition handler
+  */
+  *ret_val= FALSE;
+  DBUG_RETURN(TRUE);
+}
+
+
+/*
+  Prepare for ALTER TABLE of partition structure
+
+  SYNOPSIS
+    prep_alter_part_table()
+    thd                        Thread object
+    table                      Table object
+    inout:alter_info           Alter information
+    inout:create_info          Create info for CREATE TABLE
+    old_db_type                Old engine type
+    out:partition_changed      Boolean indicating whether partition changed
+    out:fast_alter_partition   Boolean indicating whether fast partition
+                               change is requested
+
+  RETURN VALUES
+    TRUE                       Error
+    FALSE                      Success
+    partition_changed
+    fast_alter_partition
+
+  DESCRIPTION
+    This method handles all preparations for ALTER TABLE for partitioned
+    tables
+    We need to handle both partition management command such as Add Partition
+    and others here as well as an ALTER TABLE that completely changes the
+    partitioning and yet others that don't change anything at all. We start
+    by checking the partition management variants and then check the general
+    change patterns.
+*/
+
+uint prep_alter_part_table(THD *thd, TABLE *table, ALTER_INFO *alter_info,
+                           HA_CREATE_INFO *create_info,
+                           handlerton *old_db_type,
+                           bool *partition_changed,
+                           uint *fast_alter_partition)
+{
+  DBUG_ENTER("prep_alter_part_table");
+
+  if (alter_info->flags &
+      (ALTER_ADD_PARTITION | ALTER_DROP_PARTITION |
+       ALTER_COALESCE_PARTITION | ALTER_REORGANIZE_PARTITION |
+       ALTER_TABLE_REORG | ALTER_OPTIMIZE_PARTITION |
+       ALTER_CHECK_PARTITION | ALTER_ANALYZE_PARTITION |
+       ALTER_REPAIR_PARTITION | ALTER_REBUILD_PARTITION))
+  {
+    partition_info *tab_part_info= table->part_info;
+    if (!tab_part_info)
+    {
+      my_error(ER_PARTITION_MGMT_ON_NONPARTITIONED, MYF(0));
+      DBUG_RETURN(TRUE);
+    }
+    /*
+      We are going to manipulate the partition info on the table object
+      so we need to ensure that the data structure of the table object
+      is freed by setting version to 0. table->s->version= 0 forces a
+      flush of the table object in close_thread_tables().
+    */
+    uint flags;
+    table->s->version= 0L;
+    if (alter_info->flags == ALTER_TABLE_REORG)
+    {
+      uint new_part_no, curr_part_no;
+      ulonglong max_rows= table->s->max_rows;
+      if (tab_part_info->part_type != HASH_PARTITION ||
+          tab_part_info->use_default_no_partitions)
+      {
+        my_error(ER_REORG_NO_PARAM_ERROR, MYF(0));
+        DBUG_RETURN(TRUE);
+      }
+      new_part_no= table->file->get_default_no_partitions(max_rows);
+      curr_part_no= tab_part_info->no_parts;
+      if (new_part_no == curr_part_no)
+      {
+        /*
+          No change is needed, we will have the same number of partitions
+          after the change as before. Thus we can reply ok immediately
+          without any changes at all.
+        */
+        DBUG_RETURN(fast_end_partition(thd, ULL(0), ULL(0), NULL,
+                                       TRUE, NULL, FALSE));
+      }
+      else if (new_part_no > curr_part_no)
+      {
+        /*
+          We will add more partitions, we use the ADD PARTITION without
+          setting the flag for no default number of partitions
+        */
+        alter_info->flags|= ALTER_ADD_PARTITION;
+        thd->lex->part_info->no_parts= new_part_no - curr_part_no;
+      }
+      else
+      {
+        /*
+          We will remove hash partitions, we use the COALESCE PARTITION
+          without setting the flag for no default number of partitions
+        */
+        alter_info->flags|= ALTER_COALESCE_PARTITION;
+        alter_info->no_parts= curr_part_no - new_part_no;
+      }
+    }
+    if (table->s->db_type->alter_table_flags &&
+        (!(flags= table->s->db_type->alter_table_flags(alter_info->flags))))
+    {
+      my_error(ER_PARTITION_FUNCTION_FAILURE, MYF(0));
+      DBUG_RETURN(1);
+    }
+    *fast_alter_partition= flags ^ HA_PARTITION_FUNCTION_SUPPORTED;
+    if (alter_info->flags & ALTER_ADD_PARTITION)
+    {
+      /*
+        We start by moving the new partitions to the list of temporary
+        partitions. We will then check that the new partitions fit in the
+        partitioning scheme as currently set-up.
+        Partitions are always added at the end in ADD PARTITION.
+      */
+      partition_info *alt_part_info= thd->lex->part_info;
+      uint no_new_partitions= alt_part_info->no_parts;
+      uint no_orig_partitions= tab_part_info->no_parts;
+      uint check_total_partitions= no_new_partitions + no_orig_partitions;
+      uint new_total_partitions= check_total_partitions;
+      /*
+        We allow quite a lot of values to be supplied by defaults, however we
+        must know the number of new partitions in this case.
+      */
+      if (thd->lex->no_write_to_binlog &&
+          tab_part_info->part_type != HASH_PARTITION)
+      {
+        my_error(ER_NO_BINLOG_ERROR, MYF(0));
+        DBUG_RETURN(TRUE);
+      } 
+      if (no_new_partitions == 0)
+      {
+        my_error(ER_ADD_PARTITION_NO_NEW_PARTITION, MYF(0));
+        DBUG_RETURN(TRUE);
+      }
+      if (is_sub_partitioned(tab_part_info))
+      {
+        if (alt_part_info->no_subparts == 0)
+          alt_part_info->no_subparts= tab_part_info->no_subparts;
+        else if (alt_part_info->no_subparts != tab_part_info->no_subparts)
+        {
+          my_error(ER_ADD_PARTITION_SUBPART_ERROR, MYF(0));
+          DBUG_RETURN(TRUE);
+        }
+        check_total_partitions= new_total_partitions*
+                                alt_part_info->no_subparts;
+      }
+      if (check_total_partitions > MAX_PARTITIONS)
+      {
+        my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0));
+        DBUG_RETURN(TRUE);
+      }
+      alt_part_info->part_type= tab_part_info->part_type;
+      if (set_up_defaults_for_partitioning(alt_part_info,
+                                           table->file,
+                                           ULL(0),
+                                           tab_part_info->no_parts))
+      {
+        DBUG_RETURN(TRUE);
+      }
+/*
+Handling of on-line cases:
+
+ADD PARTITION for RANGE/LIST PARTITIONING:
+------------------------------------------
+For range and list partitions add partition is simply adding a
+new empty partition to the table. If the handler support this we
+will use the simple method of doing this. The figure below shows
+an example of this and the states involved in making this change.
+            
+Existing partitions                                     New added partitions
+------       ------        ------        ------      |  ------    ------
+|    |       |    |        |    |        |    |      |  |    |    |    |
+| p0 |       | p1 |        | p2 |        | p3 |      |  | p4 |    | p5 |
+------       ------        ------        ------      |  ------    ------
+PART_NORMAL  PART_NORMAL   PART_NORMAL   PART_NORMAL    PART_TO_BE_ADDED*2
+PART_NORMAL  PART_NORMAL   PART_NORMAL   PART_NORMAL    PART_IS_ADDED*2
+
+The first line is the states before adding the new partitions and the 
+second line is after the new partitions are added. All the partitions are
+in the partitions list, no partitions are placed in the temp_partitions
+list.
+
+ADD PARTITION for HASH PARTITIONING
+-----------------------------------
+This little figure tries to show the various partitions involved when
+adding two new partitions to a linear hash based partitioned table with
+four partitions to start with, which lists are used and the states they
+pass through. Adding partitions to a normal hash based is similar except
+that it is always all the existing partitions that are reorganised not
+only a subset of them.
+
+Existing partitions                                     New added partitions
+------       ------        ------        ------      |  ------    ------
+|    |       |    |        |    |        |    |      |  |    |    |    |
+| p0 |       | p1 |        | p2 |        | p3 |      |  | p4 |    | p5 |
+------       ------        ------        ------      |  ------    ------
+PART_CHANGED PART_CHANGED  PART_NORMAL   PART_NORMAL    PART_TO_BE_ADDED
+PART_IS_CHANGED*2          PART_NORMAL   PART_NORMAL    PART_IS_ADDED
+PART_NORMAL  PART_NORMAL   PART_NORMAL   PART_NORMAL    PART_IS_ADDED
+
+Reorganised existing partitions
+------      ------
+|    |      |    |
+| p0'|      | p1'|
+------      ------
+
+p0 - p5 will be in the partitions list of partitions.
+p0' and p1' will actually not exist as separate objects, there presence can
+be deduced from the state of the partition and also the names of those
+partitions can be deduced this way.
+
+After adding the partitions and copying the partition data to p0', p1',
+p4 and p5 from p0 and p1 the states change to adapt for the new situation
+where p0 and p1 is dropped and replaced by p0' and p1' and the new p4 and
+p5 are in the table again.
+
+The first line above shows the states of the partitions before we start
+adding and copying partitions, the second after completing the adding
+and copying and finally the third line after also dropping the partitions
+that are reorganised.
+*/
+      if (*fast_alter_partition &&
+          tab_part_info->part_type == HASH_PARTITION)
+      {
+        uint part_no= 0, start_part= 1, start_sec_part= 1;
+        uint end_part= 0, end_sec_part= 0;
+        uint upper_2n= tab_part_info->linear_hash_mask + 1;
+        uint lower_2n= upper_2n >> 1;
+        bool all_parts= TRUE;
+        if (tab_part_info->linear_hash_ind &&
+            no_new_partitions < upper_2n)
+        {
+          /*
+            An analysis of which parts needs reorganisation shows that it is
+            divided into two intervals. The first interval is those parts
+            that are reorganised up until upper_2n - 1. From upper_2n and
+            onwards it starts again from partition 0 and goes on until
+            it reaches p(upper_2n - 1). If the last new partition reaches
+            beyond upper_2n - 1 then the first interval will end with
+            p(lower_2n - 1) and start with p(no_orig_partitions - lower_2n).
+            If lower_2n partitions are added then p0 to p(lower_2n - 1) will
+            be reorganised which means that the two interval becomes one
+            interval at this point. Thus only when adding less than
+            lower_2n partitions and going beyond a total of upper_2n we
+            actually get two intervals.
+
+            To exemplify this assume we have 6 partitions to start with and
+            add 1, 2, 3, 5, 6, 7, 8, 9 partitions.
+            The first to add after p5 is p6 = 110 in bit numbers. Thus we
+            can see that 10 = p2 will be partition to reorganise if only one
+            partition.
+            If 2 partitions are added we reorganise [p2, p3]. Those two
+            cases are covered by the second if part below.
+            If 3 partitions are added we reorganise [p2, p3] U [p0,p0]. This
+            part is covered by the else part below.
+            If 5 partitions are added we get [p2,p3] U [p0, p2] = [p0, p3].
+            This is covered by the first if part where we need the max check
+            to here use lower_2n - 1.
+            If 7 partitions are added we get [p2,p3] U [p0, p4] = [p0, p4].
+            This is covered by the first if part but here we use the first
+            calculated end_part.
+            Finally with 9 new partitions we would also reorganise p6 if we
+            used the method below but we cannot reorganise more partitions
+            than what we had from the start and thus we simply set all_parts
+            to TRUE. In this case we don't get into this if-part at all.
+          */
+          all_parts= FALSE;
+          if (no_new_partitions >= lower_2n)
+          {
+            /*
+              In this case there is only one interval since the two intervals
+              overlap and this starts from zero to last_part_no - upper_2n
+            */
+            start_part= 0;
+            end_part= new_total_partitions - (upper_2n + 1);
+            end_part= max(lower_2n - 1, end_part);
+          }
+          else if (new_total_partitions <= upper_2n)
+          {
+            /*
+              Also in this case there is only one interval since we are not
+              going over a 2**n boundary
+            */
+            start_part= no_orig_partitions - lower_2n;
+            end_part= start_part + (no_new_partitions - 1);
+          }
+          else
+          {
+            /* We have two non-overlapping intervals since we are not
+               passing a 2**n border and we have not at least lower_2n
+               new parts that would ensure that the intervals become
+               overlapping.
+            */
+            start_part= no_orig_partitions - lower_2n;
+            end_part= upper_2n - 1;
+            start_sec_part= 0;
+            end_sec_part= new_total_partitions - (upper_2n + 1);
+          }
+        }
+        List_iterator<partition_element> tab_it(tab_part_info->partitions);
+        part_no= 0;
+        do
+        {
+          partition_element *p_elem= tab_it++;
+          if (all_parts ||
+              (part_no >= start_part && part_no <= end_part) ||
+              (part_no >= start_sec_part && part_no <= end_sec_part))
+          {
+            p_elem->part_state= PART_CHANGED;
+          }
+        } while (++part_no < no_orig_partitions);
+      }
+      /*
+        Need to concatenate the lists here to make it possible to check the
+        partition info for correctness using check_partition_info.
+        For on-line add partition we set the state of this partition to
+        PART_TO_BE_ADDED to ensure that it is known that it is not yet
+        usable (becomes usable when partition is created and the switch of
+        partition configuration is made.
+      */
+      {
+        List_iterator<partition_element> alt_it(alt_part_info->partitions);
+        uint part_count= 0;
+        do
+        {
+          partition_element *part_elem= alt_it++;
+          if (*fast_alter_partition)
+            part_elem->part_state= PART_TO_BE_ADDED;
+          if (tab_part_info->partitions.push_back(part_elem))
+          {
+            mem_alloc_error(1);
+            DBUG_RETURN(TRUE);
+          }
+        } while (++part_count < no_new_partitions);
+        tab_part_info->no_parts+= no_new_partitions;
+      }
+      /*
+        If we specify partitions explicitly we don't use defaults anymore.
+        Using ADD PARTITION also means that we don't have the default number
+        of partitions anymore. We use this code also for Table reorganisations
+        and here we don't set any default flags to FALSE.
+      */
+      if (!(alter_info->flags & ALTER_TABLE_REORG))
+      {
+        if (!alt_part_info->use_default_partitions)
+        {
+          DBUG_PRINT("info", ("part_info= %x", tab_part_info));
+          tab_part_info->use_default_partitions= FALSE;
+        }
+        tab_part_info->use_default_no_partitions= FALSE;
+      }
+    }
+    else if (alter_info->flags == ALTER_DROP_PARTITION)
+    {
+      /*
+        Drop a partition from a range partition and list partitioning is
+        always safe and can be made more or less immediate. It is necessary
+        however to ensure that the partition to be removed is safely removed
+        and that REPAIR TABLE can remove the partition if for some reason the
+        command to drop the partition failed in the middle.
+      */
+      uint part_count= 0;
+      uint no_parts_dropped= alter_info->partition_names.elements;
+      uint no_parts_found= 0;
+      List_iterator<partition_element> part_it(tab_part_info->partitions);
+      if (!(tab_part_info->part_type == RANGE_PARTITION ||
+            tab_part_info->part_type == LIST_PARTITION))
+      {
+        my_error(ER_ONLY_ON_RANGE_LIST_PARTITION, MYF(0), "DROP");
+        DBUG_RETURN(TRUE);
+      }
+      if (no_parts_dropped >= tab_part_info->no_parts)
+      {
+        my_error(ER_DROP_LAST_PARTITION, MYF(0));
+        DBUG_RETURN(TRUE);
+      }
+      do
+      {
+        partition_element *part_elem= part_it++;
+        if (is_name_in_list(part_elem->partition_name,
+                            alter_info->partition_names))
+        {
+          /*
+            Set state to indicate that the partition is to be dropped.
+          */
+          no_parts_found++;
+          part_elem->part_state= PART_TO_BE_DROPPED;
+        }
+      } while (++part_count < tab_part_info->no_parts);
+      if (no_parts_found != no_parts_dropped)
+      {
+        my_error(ER_DROP_PARTITION_NON_EXISTENT, MYF(0), "DROP");
+        DBUG_RETURN(TRUE);
+      }
+      if (table->file->is_fk_defined_on_table_or_index(MAX_KEY))
+      {
+        my_error(ER_ROW_IS_REFERENCED, MYF(0));
+        DBUG_RETURN(TRUE);
+      }
+    }
+    else if ((alter_info->flags & ALTER_OPTIMIZE_PARTITION) ||
+             (alter_info->flags & ALTER_ANALYZE_PARTITION) ||
+             (alter_info->flags & ALTER_CHECK_PARTITION) ||
+             (alter_info->flags & ALTER_REPAIR_PARTITION) ||
+             (alter_info->flags & ALTER_REBUILD_PARTITION))
+    {
+      uint no_parts_opt= alter_info->partition_names.elements;
+      uint part_count= 0;
+      uint no_parts_found= 0;
+      List_iterator<partition_element> part_it(tab_part_info->partitions);
+
+      do
+      {
+        partition_element *part_elem= part_it++;
+        if ((alter_info->flags & ALTER_ALL_PARTITION) ||
+            (is_name_in_list(part_elem->partition_name,
+                             alter_info->partition_names)))
+        {
+          /*
+            Mark the partition as a partition to be "changed" by
+            analyzing/optimizing/rebuilding/checking/repairing
+          */
+          no_parts_found++;
+          part_elem->part_state= PART_CHANGED;
+        }
+      } while (++part_count < tab_part_info->no_parts);
+      if (no_parts_found != no_parts_opt &&
+          (!(alter_info->flags & ALTER_ALL_PARTITION)))
+      {
+        const char *ptr;
+        if (alter_info->flags & ALTER_OPTIMIZE_PARTITION)
+          ptr= "OPTIMIZE";
+        else if (alter_info->flags & ALTER_ANALYZE_PARTITION)
+          ptr= "ANALYZE";
+        else if (alter_info->flags & ALTER_CHECK_PARTITION)
+          ptr= "CHECK";
+        else if (alter_info->flags & ALTER_REPAIR_PARTITION)
+          ptr= "REPAIR";
+        else
+          ptr= "REBUILD";
+        my_error(ER_DROP_PARTITION_NON_EXISTENT, MYF(0), ptr);
+        DBUG_RETURN(TRUE);
+      }
+    }
+    else if (alter_info->flags & ALTER_COALESCE_PARTITION)
+    {
+      uint no_parts_coalesced= alter_info->no_parts;
+      uint no_parts_remain= tab_part_info->no_parts - no_parts_coalesced;
+      List_iterator<partition_element> part_it(tab_part_info->partitions);
+      if (tab_part_info->part_type != HASH_PARTITION)
+      {
+        my_error(ER_COALESCE_ONLY_ON_HASH_PARTITION, MYF(0));
+        DBUG_RETURN(TRUE);
+      }
+      if (no_parts_coalesced == 0)
+      {
+        my_error(ER_COALESCE_PARTITION_NO_PARTITION, MYF(0));
+        DBUG_RETURN(TRUE);
+      }
+      if (no_parts_coalesced >= tab_part_info->no_parts)
+      {
+        my_error(ER_DROP_LAST_PARTITION, MYF(0));
+        DBUG_RETURN(TRUE);
+      }
+/*
+Online handling:
+COALESCE PARTITION:
+-------------------
+The figure below shows the manner in which partitions are handled when
+performing an on-line coalesce partition and which states they go through
+at start, after adding and copying partitions and finally after dropping
+the partitions to drop. The figure shows an example using four partitions
+to start with, using linear hash and coalescing one partition (always the
+last partition).
+
+Using linear hash then all remaining partitions will have a new reorganised
+part.
+
+Existing partitions                     Coalesced partition 
+------       ------              ------   |      ------
+|    |       |    |              |    |   |      |    |
+| p0 |       | p1 |              | p2 |   |      | p3 |
+------       ------              ------   |      ------
+PART_NORMAL  PART_CHANGED        PART_NORMAL     PART_REORGED_DROPPED
+PART_NORMAL  PART_IS_CHANGED     PART_NORMAL     PART_TO_BE_DROPPED
+PART_NORMAL  PART_NORMAL         PART_NORMAL     PART_IS_DROPPED
+
+Reorganised existing partitions
+            ------
+            |    |
+            | p1'|
+            ------
+
+p0 - p3 is in the partitions list.
+The p1' partition will actually not be in any list it is deduced from the
+state of p1.
+*/
+      {
+        uint part_count= 0, start_part= 1, start_sec_part= 1;
+        uint end_part= 0, end_sec_part= 0;
+        bool all_parts= TRUE;
+        if (*fast_alter_partition &&
+            tab_part_info->linear_hash_ind)
+        {
+          uint upper_2n= tab_part_info->linear_hash_mask + 1;
+          uint lower_2n= upper_2n >> 1;
+          all_parts= FALSE;
+          if (no_parts_coalesced >= lower_2n)
+          {
+            all_parts= TRUE;
+          }
+          else if (no_parts_remain >= lower_2n)
+          {
+            end_part= tab_part_info->no_parts - (lower_2n + 1);
+            start_part= no_parts_remain - lower_2n;
+          }
+          else
+          {
+            start_part= 0;
+            end_part= tab_part_info->no_parts - (lower_2n + 1);
+            end_sec_part= (lower_2n >> 1) - 1;
+            start_sec_part= end_sec_part - (lower_2n - (no_parts_remain + 1));
+          }
+        }
+        do
+        {
+          partition_element *p_elem= part_it++;
+          if (*fast_alter_partition &&
+              (all_parts ||
+              (part_count >= start_part && part_count <= end_part) ||
+              (part_count >= start_sec_part && part_count <= end_sec_part)))
+            p_elem->part_state= PART_CHANGED;
+          if (++part_count > no_parts_remain)
+          {
+            if (*fast_alter_partition)
+              p_elem->part_state= PART_REORGED_DROPPED;
+            else
+              part_it.remove();
+          }
+        } while (part_count < tab_part_info->no_parts);
+        tab_part_info->no_parts= no_parts_remain;
+      }
+      if (!(alter_info->flags & ALTER_TABLE_REORG))
+        tab_part_info->use_default_no_partitions= FALSE;
+    }
+    else if (alter_info->flags == ALTER_REORGANIZE_PARTITION)
+    {
+      /*
+        Reorganise partitions takes a number of partitions that are next
+        to each other (at least for RANGE PARTITIONS) and then uses those
+        to create a set of new partitions. So data is copied from those
+        partitions into the new set of partitions. Those new partitions
+        can have more values in the LIST value specifications or less both
+        are allowed. The ranges can be different but since they are 
+        changing a set of consecutive partitions they must cover the same
+        range as those changed from.
+        This command can be used on RANGE and LIST partitions.
+      */
+      uint no_parts_reorged= alter_info->partition_names.elements;
+      uint no_parts_new= thd->lex->part_info->partitions.elements;
+      partition_info *alt_part_info= thd->lex->part_info;
+      uint check_total_partitions;
+      if (no_parts_reorged > tab_part_info->no_parts)
+      {
+        my_error(ER_REORG_PARTITION_NOT_EXIST, MYF(0));
+        DBUG_RETURN(TRUE);
+      }
+      if (!(tab_part_info->part_type == RANGE_PARTITION ||
+            tab_part_info->part_type == LIST_PARTITION) &&
+           (no_parts_new != no_parts_reorged))
+      {
+        my_error(ER_REORG_HASH_ONLY_ON_SAME_NO, MYF(0));
+        DBUG_RETURN(TRUE);
+      }
+      check_total_partitions= tab_part_info->no_parts + no_parts_new;
+      check_total_partitions-= no_parts_reorged;
+      if (check_total_partitions > MAX_PARTITIONS)
+      {
+        my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0));
+        DBUG_RETURN(TRUE);
+      }
+/*
+Online handling:
+REORGANIZE PARTITION:
+---------------------
+The figure exemplifies the handling of partitions, their state changes and
+how they are organised. It exemplifies four partitions where two of the
+partitions are reorganised (p1 and p2) into two new partitions (p4 and p5).
+The reason of this change could be to change range limits, change list
+values or for hash partitions simply reorganise the partition which could
+also involve moving them to new disks or new node groups (MySQL Cluster).
+
+Existing partitions                                  
+------       ------        ------        ------
+|    |       |    |        |    |        |    |
+| p0 |       | p1 |        | p2 |        | p3 |
+------       ------        ------        ------
+PART_NORMAL  PART_TO_BE_REORGED          PART_NORMAL
+PART_NORMAL  PART_TO_BE_DROPPED          PART_NORMAL
+PART_NORMAL  PART_IS_DROPPED             PART_NORMAL
+
+Reorganised new partitions (replacing p1 and p2)
+------      ------
+|    |      |    |
+| p4 |      | p5 |
+------      ------
+PART_TO_BE_ADDED
+PART_IS_ADDED
+PART_IS_ADDED
+
+All unchanged partitions and the new partitions are in the partitions list
+in the order they will have when the change is completed. The reorganised
+partitions are placed in the temp_partitions list. PART_IS_ADDED is only a
+temporary state not written in the frm file. It is used to ensure we write
+the generated partition syntax in a correct manner.
+*/
+      {
+        List_iterator<partition_element> tab_it(tab_part_info->partitions);
+        uint part_count= 0;
+        bool found_first= FALSE;
+        bool found_last= FALSE;
+        bool is_last_partition_reorged;
+        uint drop_count= 0;
+        longlong tab_max_range= 0, alt_max_range= 0;
+        do
+        {
+          partition_element *part_elem= tab_it++;
+          is_last_partition_reorged= FALSE;
+          if (is_name_in_list(part_elem->partition_name,
+                              alter_info->partition_names))
+          {
+            is_last_partition_reorged= TRUE;
+            drop_count++;
+            tab_max_range= part_elem->range_value;
+            if (*fast_alter_partition &&
+                tab_part_info->temp_partitions.push_back(part_elem))
+            {
+              mem_alloc_error(1);
+              DBUG_RETURN(TRUE);
+            }
+            if (*fast_alter_partition)
+              part_elem->part_state= PART_TO_BE_REORGED;
+            if (!found_first)
+            {
+              uint alt_part_count= 0;
+              found_first= TRUE;
+              List_iterator<partition_element>
+                                 alt_it(alt_part_info->partitions);
+              do
+              {
+                partition_element *alt_part_elem= alt_it++;
+                alt_max_range= alt_part_elem->range_value;
+                if (*fast_alter_partition)
+                  alt_part_elem->part_state= PART_TO_BE_ADDED;
+                if (alt_part_count == 0)
+                  tab_it.replace(alt_part_elem);
+                else
+                  tab_it.after(alt_part_elem);
+              } while (++alt_part_count < no_parts_new);
+            }
+            else if (found_last)
+            {
+              my_error(ER_CONSECUTIVE_REORG_PARTITIONS, MYF(0));
+              DBUG_RETURN(TRUE);
+            }
+            else
+              tab_it.remove();
+          }
+          else
+          {
+            if (found_first)
+              found_last= TRUE;
+          }
+        } while (++part_count < tab_part_info->no_parts);
+        if (drop_count != no_parts_reorged)
+        {
+          my_error(ER_DROP_PARTITION_NON_EXISTENT, MYF(0), "REORGANIZE");
+          DBUG_RETURN(TRUE);
+        }
+        if (tab_part_info->part_type == RANGE_PARTITION &&
+            ((is_last_partition_reorged &&
+               alt_max_range < tab_max_range) ||
+              (!is_last_partition_reorged &&
+               alt_max_range != tab_max_range)))
+        {
+          /*
+            For range partitioning the total resulting range before and
+            after the change must be the same except in one case. This is
+            when the last partition is reorganised, in this case it is
+            acceptable to increase the total range.
+            The reason is that it is not allowed to have "holes" in the
+            middle of the ranges and thus we should not allow to reorganise
+            to create "holes". Also we should not allow using REORGANIZE
+            to drop data.
+          */
+          my_error(ER_REORG_OUTSIDE_RANGE, MYF(0));
+          DBUG_RETURN(TRUE);
+        }
+        tab_part_info->no_parts= check_total_partitions;
+      }
+    }
+    else
+    {
+      DBUG_ASSERT(FALSE);
+    }
+    *partition_changed= TRUE;
+    create_info->db_type= &partition_hton;
+    thd->lex->part_info= tab_part_info;
+    if (alter_info->flags == ALTER_ADD_PARTITION ||
+        alter_info->flags == ALTER_REORGANIZE_PARTITION)
+    {
+      if (check_partition_info(tab_part_info, (handlerton**)NULL,
+                               table->file, ULL(0)))
+      {
+        DBUG_RETURN(TRUE);
+      }
+    }
+  }
+  else
+  {
+    /*
+     When thd->lex->part_info has a reference to a partition_info the
+     ALTER TABLE contained a definition of a partitioning.
+
+     Case I:
+       If there was a partition before and there is a new one defined.
+       We use the new partitioning. The new partitioning is already
+       defined in the correct variable so no work is needed to
+       accomplish this.
+       We do however need to update partition_changed to ensure that not
+       only the frm file is changed in the ALTER TABLE command.
+
+     Case IIa:
+       There was a partitioning before and there is no new one defined.
+       Also the user has not specified an explicit engine to use.
+
+       We use the old partitioning also for the new table. We do this
+       by assigning the partition_info from the table loaded in
+       open_ltable to the partition_info struct used by mysql_create_table
+       later in this method.
+
+     Case IIb:
+       There was a partitioning before and there is no new one defined.
+       The user has specified an explicit engine to use.
+
+       Since the user has specified an explicit engine to use we override
+       the old partitioning info and create a new table using the specified
+       engine. This is the reason for the extra check if old and new engine
+       is equal.
+       In this case the partition also is changed.
+
+     Case III:
+       There was no partitioning before altering the table, there is
+       partitioning defined in the altered table. Use the new partitioning.
+       No work needed since the partitioning info is already in the
+       correct variable.
+
+       In this case we discover one case where the new partitioning is using
+       the same partition function as the default (PARTITION BY KEY or
+       PARTITION BY LINEAR KEY with the list of fields equal to the primary
+       key fields OR PARTITION BY [LINEAR] KEY() for tables without primary
+       key)
+       Also here partition has changed and thus a new table must be
+       created.
+
+     Case IV:
+       There was no partitioning before and no partitioning defined.
+       Obviously no work needed.
+    */
+    if (table->part_info)
+    {
+      if (!thd->lex->part_info &&
+          create_info->db_type == old_db_type)
+        thd->lex->part_info= table->part_info;
+    }
+    if (thd->lex->part_info)
+    {
+      /*
+        Need to cater for engine types that can handle partition without
+        using the partition handler.
+      */
+      if (thd->lex->part_info != table->part_info)
+        *partition_changed= TRUE;
+      if (create_info->db_type == &partition_hton)
+      {
+        if (table->part_info)
+        {
+          thd->lex->part_info->default_engine_type=
+                               table->part_info->default_engine_type;
+        }
+        else
+        {
+          thd->lex->part_info->default_engine_type= 
+                           ha_checktype(thd, DB_TYPE_DEFAULT, FALSE, FALSE);
+        }
+      }
+      else
+      {
+        bool is_native_partitioned= FALSE;
+        partition_info *part_info= thd->lex->part_info;
+        part_info->default_engine_type= create_info->db_type;
+        if (check_native_partitioned(create_info, &is_native_partitioned,
+                                     part_info, thd))
+        {
+          DBUG_RETURN(TRUE);
+        }
+        if (!is_native_partitioned)
+        {
+          DBUG_ASSERT(create_info->db_type != &default_hton);
+          create_info->db_type= &partition_hton;
+        }
+      }
+      DBUG_PRINT("info", ("default_db_type = %s",
+                 thd->lex->part_info->default_engine_type->name));
+    }
+  }
+  DBUG_RETURN(FALSE);
+}
+
+
+/*
+  Change partitions, used to implement ALTER TABLE ADD/REORGANIZE/COALESCE
+  partitions. This method is used to implement both single-phase and multi-
+  phase implementations of ADD/REORGANIZE/COALESCE partitions.
+
+  SYNOPSIS
+    mysql_change_partitions()
+    lpt                        Struct containing parameters
+
+  RETURN VALUES
+    TRUE                          Failure
+    FALSE                         Success
+
+  DESCRIPTION
+    Request handler to add partitions as set in states of the partition
+
+    Elements of the lpt parameters used:
+    create_info                Create information used to create partitions
+    db                         Database name
+    table_name                 Table name
+    copied                     Output parameter where number of copied
+                               records are added
+    deleted                    Output parameter where number of deleted
+                               records are added
+*/
+
+static bool mysql_change_partitions(ALTER_PARTITION_PARAM_TYPE *lpt)
+{
+  char path[FN_REFLEN+1];
+  DBUG_ENTER("mysql_change_partitions");
+
+  build_table_filename(path, sizeof(path), lpt->db, lpt->table_name, "");
+  DBUG_RETURN(lpt->table->file->change_partitions(lpt->create_info, path,
+                                                  &lpt->copied,
+                                                  &lpt->deleted,
+                                                  lpt->pack_frm_data,
+                                                  lpt->pack_frm_len));
+}
+
+
+/*
+  Rename partitions in an ALTER TABLE of partitions
+
+  SYNOPSIS
+    mysql_rename_partitions()
+    lpt                        Struct containing parameters
+
+  RETURN VALUES
+    TRUE                          Failure
+    FALSE                         Success
+
+  DESCRIPTION
+    Request handler to rename partitions as set in states of the partition
+
+    Parameters used:
+    db                         Database name
+    table_name                 Table name
+*/
+
+static bool mysql_rename_partitions(ALTER_PARTITION_PARAM_TYPE *lpt)
+{
+  char path[FN_REFLEN+1];
+  DBUG_ENTER("mysql_rename_partitions");
+
+  build_table_filename(path, sizeof(path), lpt->db, lpt->table_name, "");
+  DBUG_RETURN(lpt->table->file->rename_partitions(path));
+}
+
+
+/*
+  Drop partitions in an ALTER TABLE of partitions
+
+  SYNOPSIS
+    mysql_drop_partitions()
+    lpt                        Struct containing parameters
+
+  RETURN VALUES
+    TRUE                          Failure
+    FALSE                         Success
+  DESCRIPTION
+    Drop the partitions marked with PART_TO_BE_DROPPED state and remove
+    those partitions from the list.
+
+    Parameters used:
+    table                       Table object
+    db                          Database name
+    table_name                  Table name
+*/
+
+static bool mysql_drop_partitions(ALTER_PARTITION_PARAM_TYPE *lpt)
+{
+  char path[FN_REFLEN+1];
+  partition_info *part_info= lpt->table->part_info;
+  List_iterator<partition_element> part_it(part_info->partitions);
+  uint i= 0;
+  uint remove_count= 0;
+  DBUG_ENTER("mysql_drop_partitions");
+
+  build_table_filename(path, sizeof(path), lpt->db, lpt->table_name, "");
+  if (lpt->table->file->drop_partitions(path))
+  {
+    DBUG_RETURN(TRUE);
+  }
+  do
+  {
+    partition_element *part_elem= part_it++;
+    if (part_elem->part_state == PART_IS_DROPPED)
+    {
+      part_it.remove();
+      remove_count++;
+    }
+  } while (++i < part_info->no_parts);
+  part_info->no_parts-= remove_count;
+  DBUG_RETURN(FALSE);
+}
+
+
+/*
+  Actually perform the change requested by ALTER TABLE of partitions
+  previously prepared.
+
+  SYNOPSIS
+    fast_alter_partition_table()
+    thd                           Thread object
+    table                         Table object
+    alter_info                    ALTER TABLE info
+    create_info                   Create info for CREATE TABLE
+    table_list                    List of the table involved
+    create_list                   The fields in the resulting table
+    key_list                      The keys in the resulting table
+    db                            Database name of new table
+    table_name                    Table name of new table
+
+  RETURN VALUES
+    TRUE                          Error
+    FALSE                         Success
+
+  DESCRIPTION
+    Perform all ALTER TABLE operations for partitioned tables that can be
+    performed fast without a full copy of the original table.
+*/
+
+uint fast_alter_partition_table(THD *thd, TABLE *table,
+                                ALTER_INFO *alter_info,
+                                HA_CREATE_INFO *create_info,
+                                TABLE_LIST *table_list,
+                                List<create_field> *create_list,
+                                List<Key> *key_list, const char *db,
+                                const char *table_name,
+                                uint fast_alter_partition)
+{
+  /* Set-up struct used to write frm files */
+  ulonglong copied= 0;
+  ulonglong deleted= 0;
+  partition_info *part_info= table->part_info;
+  ALTER_PARTITION_PARAM_TYPE lpt_obj;
+  ALTER_PARTITION_PARAM_TYPE *lpt= &lpt_obj;
+  bool written_bin_log= TRUE;
+  DBUG_ENTER("fast_alter_partition_table");
+
+  lpt->thd= thd;
+  lpt->create_info= create_info;
+  lpt->create_list= create_list;
+  lpt->key_list= key_list;
+  lpt->db_options= create_info->table_options;
+  if (create_info->row_type == ROW_TYPE_DYNAMIC)
+    lpt->db_options|= HA_OPTION_PACK_RECORD;
+  lpt->table= table;
+  lpt->key_info_buffer= 0;
+  lpt->key_count= 0;
+  lpt->db= db;
+  lpt->table_name= table_name;
+  lpt->copied= 0;
+  lpt->deleted= 0;
+  lpt->pack_frm_data= NULL;
+  lpt->pack_frm_len= 0;
+  thd->lex->part_info= part_info;
+
+  if (alter_info->flags & ALTER_OPTIMIZE_PARTITION ||
+      alter_info->flags & ALTER_ANALYZE_PARTITION ||
+      alter_info->flags & ALTER_CHECK_PARTITION ||
+      alter_info->flags & ALTER_REPAIR_PARTITION)
+  {
+    /*
+      In this case the user has specified that he wants a set of partitions
+      to be optimised and the partition engine can handle optimising
+      partitions natively without requiring a full rebuild of the
+      partitions.
+
+      In this case it is enough to call optimise_partitions, there is no
+      need to change frm files or anything else.
+    */
+    written_bin_log= FALSE;
+    if (((alter_info->flags & ALTER_OPTIMIZE_PARTITION) &&
+         (table->file->optimize_partitions(thd))) ||
+        ((alter_info->flags & ALTER_ANALYZE_PARTITION) &&
+         (table->file->analyze_partitions(thd))) ||
+        ((alter_info->flags & ALTER_CHECK_PARTITION) &&
+         (table->file->check_partitions(thd))) ||
+        ((alter_info->flags & ALTER_REPAIR_PARTITION) &&
+         (table->file->repair_partitions(thd))))
+    {
+      fast_alter_partition_error_handler(lpt);
+      DBUG_RETURN(TRUE);
+    }
+  }
+  else if (fast_alter_partition & HA_PARTITION_ONE_PHASE)
+  {
+    /*
+      In the case where the engine supports one phase online partition
+      changes it is not necessary to have any exclusive locks. The
+      correctness is upheld instead by transactions being aborted if they
+      access the table after its partition definition has changed (if they
+      are still using the old partition definition).
+
+      The handler is in this case responsible to ensure that all users
+      start using the new frm file after it has changed. To implement
+      one phase it is necessary for the handler to have the master copy
+      of the frm file and use discovery mechanisms to renew it. Thus
+      write frm will write the frm, pack the new frm and finally
+      the frm is deleted and the discovery mechanisms will either restore
+      back to the old or installing the new after the change is activated.
+
+      Thus all open tables will be discovered that they are old, if not
+      earlier as soon as they try an operation using the old table. One
+      should ensure that this is checked already when opening a table,
+      even if it is found in the cache of open tables.
+
+      change_partitions will perform all operations and it is the duty of
+      the handler to ensure that the frm files in the system gets updated
+      in synch with the changes made and if an error occurs that a proper
+      error handling is done.
+
+      If the MySQL Server crashes at this moment but the handler succeeds
+      in performing the change then the binlog is not written for the
+      change. There is no way to solve this as long as the binlog is not
+      transactional and even then it is hard to solve it completely.
+ 
+      The first approach here was to downgrade locks. Now a different approach
+      is decided upon. The idea is that the handler will have access to the
+      ALTER_INFO when store_lock arrives with TL_WRITE_ALLOW_READ. So if the
+      handler knows that this functionality can be handled with a lower lock
+      level it will set the lock level to TL_WRITE_ALLOW_WRITE immediately.
+      Thus the need to downgrade the lock disappears.
+      1) Write the new frm, pack it and then delete it
+      2) Perform the change within the handler
+    */
+    if ((mysql_write_frm(lpt, WFRM_INITIAL_WRITE | WFRM_PACK_FRM)) ||
+        (mysql_change_partitions(lpt)))
+    {
+      fast_alter_partition_error_handler(lpt);
+      DBUG_RETURN(TRUE);
+    }
+  }
+  else if (alter_info->flags == ALTER_DROP_PARTITION)
+  {
+    /*
+      Now after all checks and setting state on dropped partitions we can
+      start the actual dropping of the partitions.
+
+      Drop partition is actually two things happening. The first is that
+      a lot of records are deleted. The second is that the behaviour of
+      subsequent updates and writes and deletes will change. The delete
+      part can be handled without any particular high lock level by
+      transactional engines whereas non-transactional engines need to
+      ensure that this change is done with an exclusive lock on the table.
+      The second part, the change of partitioning does however require
+      an exclusive lock to install the new partitioning as one atomic
+      operation. If this is not the case, it is possible for two
+      transactions to see the change in a different order than their
+      serialisation order. Thus we need an exclusive lock for both
+      transactional and non-transactional engines.
+
+      For LIST partitions it could be possible to avoid the exclusive lock
+      (and for RANGE partitions if they didn't rearrange range definitions
+      after a DROP PARTITION) if one ensured that failed accesses to the
+      dropped partitions was aborted for sure (thus only possible for
+      transactional engines).
+      
+      1) Lock the table in TL_WRITE_ONLY to ensure all other accesses to
+         the table have completed
+      2) Write the new frm file where the partitions have changed but are
+         still remaining with the state PART_TO_BE_DROPPED
+      3) Write the bin log
+      4) Prepare MyISAM handlers for drop of partitions
+      5) Ensure that any users that has opened the table but not yet
+         reached the abort lock do that before downgrading the lock.
+      6) Drop the partitions
+      7) Write the frm file that the partition has been dropped
+      8) Wait until all accesses using the old frm file has completed
+      9) Complete query
+    */
+    if ((abort_and_upgrade_lock(lpt)) ||
+        (mysql_write_frm(lpt, WFRM_INITIAL_WRITE)) ||
+        ((!thd->lex->no_write_to_binlog) &&
+         (write_bin_log(thd, FALSE,
+                       thd->query, thd->query_length), FALSE)) ||
+        (table->file->extra(HA_EXTRA_PREPARE_FOR_DELETE)) ||
+        (close_open_tables_and_downgrade(lpt), FALSE) || 
+        (mysql_drop_partitions(lpt)) ||
+        (mysql_write_frm(lpt, WFRM_CREATE_HANDLER_FILES)) ||
+        (mysql_wait_completed_table(lpt, table), FALSE))
+    {
+      fast_alter_partition_error_handler(lpt);
+      DBUG_RETURN(TRUE);
+    }
+  }
+  else if ((alter_info->flags & ALTER_ADD_PARTITION) &&
+           (part_info->part_type == RANGE_PARTITION ||
+            part_info->part_type == LIST_PARTITION))
+  {
+    /*
+      ADD RANGE/LIST PARTITIONS
+      In this case there are no tuples removed and no tuples are added.
+      Thus the operation is merely adding a new partition. Thus it is
+      necessary to perform the change as an atomic operation. Otherwise
+      someone reading without seeing the new partition could potentially
+      miss updates made by a transaction serialised before it that are
+      inserted into the new partition.
+
+      1) Write the new frm file where state of added partitions is
+         changed to PART_TO_BE_ADDED
+      2) Add the new partitions
+      3) Lock all partitions in TL_WRITE_ONLY to ensure that no users
+         are still using the old partitioning scheme. Wait until all
+         ongoing users have completed before progressing.
+      4) Write a new frm file of the table where the partitions are added
+         to the table.
+      5) Write binlog
+      6) Wait until all accesses using the old frm file has completed
+      7) Complete query
+    */
+    if ((mysql_write_frm(lpt, WFRM_INITIAL_WRITE)) ||
+        (mysql_change_partitions(lpt)) ||
+        (abort_and_upgrade_lock(lpt)) ||
+        (mysql_write_frm(lpt, WFRM_CREATE_HANDLER_FILES)) ||
+        ((!thd->lex->no_write_to_binlog) &&
+         (write_bin_log(thd, FALSE,
+                        thd->query, thd->query_length), FALSE)) ||
+        (close_open_tables_and_downgrade(lpt), FALSE))
+    {
+      fast_alter_partition_error_handler(lpt);
+      DBUG_RETURN(TRUE);
+    }
+  }
+  else
+  {
+    /*
+      ADD HASH PARTITION/
+      COALESCE PARTITION/
+      REBUILD PARTITION/
+      REORGANIZE PARTITION
+ 
+      In this case all records are still around after the change although
+      possibly organised into new partitions, thus by ensuring that all
+      updates go to both the old and the new partitioning scheme we can
+      actually perform this operation lock-free. The only exception to
+      this is when REORGANIZE PARTITION adds/drops ranges. In this case
+      there needs to be an exclusive lock during the time when the range
+      changes occur.
+      This is only possible if the handler can ensure double-write for a
+      period. The double write will ensure that it doesn't matter where the
+      data is read from since both places are updated for writes. If such
+      double writing is not performed then it is necessary to perform the
+      change with the usual exclusive lock. With double writes it is even
+      possible to perform writes in parallel with the reorganisation of
+      partitions.
+
+      Without double write procedure we get the following procedure.
+      The only difference with using double write is that we can downgrade
+      the lock to TL_WRITE_ALLOW_WRITE. Double write in this case only
+      double writes from old to new. If we had double writing in both
+      directions we could perform the change completely without exclusive
+      lock for HASH partitions.
+      Handlers that perform double writing during the copy phase can actually
+      use a lower lock level. This can be handled inside store_lock in the
+      respective handler.
+
+      1) Write the new frm file where state of added partitions is
+         changed to PART_TO_BE_ADDED and the reorganised partitions
+         are set in state PART_TO_BE_REORGED.
+      2) Add the new partitions
+         Copy from the reorganised partitions to the new partitions
+      3) Lock all partitions in TL_WRITE_ONLY to ensure that no users
+         are still using the old partitioning scheme. Wait until all
+         ongoing users have completed before progressing.
+      4) Prepare MyISAM handlers for rename and delete of partitions
+      5) Write a new frm file of the table where the partitions are
+         reorganised.
+      6) Rename the reorged partitions such that they are no longer
+         used and rename those added to their real new names.
+      7) Write bin log
+      8) Wait until all accesses using the old frm file has completed
+      9) Drop the reorganised partitions
+      10)Write a new frm file of the table where the partitions are
+         reorganised.
+      11)Wait until all accesses using the old frm file has completed
+      12)Complete query
+    */
+
+    if ((mysql_write_frm(lpt, WFRM_INITIAL_WRITE)) ||
+        (mysql_change_partitions(lpt)) ||
+        (abort_and_upgrade_lock(lpt)) ||
+        (mysql_write_frm(lpt, WFRM_CREATE_HANDLER_FILES)) ||
+        (table->file->extra(HA_EXTRA_PREPARE_FOR_DELETE)) ||
+        (mysql_rename_partitions(lpt)) ||
+        ((!thd->lex->no_write_to_binlog) &&
+         (write_bin_log(thd, FALSE,
+                        thd->query, thd->query_length), FALSE)) ||
+        (close_open_tables_and_downgrade(lpt), FALSE) ||
+        (mysql_drop_partitions(lpt)) ||
+        (mysql_write_frm(lpt, 0UL)) ||
+        (mysql_wait_completed_table(lpt, table), FALSE))
+    {
+        fast_alter_partition_error_handler(lpt);
+        DBUG_RETURN(TRUE);
+    }
+  }
+  /*
+    A final step is to write the query to the binlog and send ok to the
+    user
+  */
+  DBUG_RETURN(fast_end_partition(thd, lpt->copied, lpt->deleted,
+                                 table_list, FALSE, lpt,
+                                 written_bin_log));
+}
 #endif
 
+
 /*
   Prepare for calling val_int on partition function by setting fields to
   point to the record where the values of the PF-fields are stored.
+
   SYNOPSIS
     set_field_ptr()
     ptr                 Array of fields to change ptr
     new_buf             New record pointer
     old_buf             Old record pointer
+
   DESCRIPTION
     Set ptr in field objects of field array to refer to new_buf record
     instead of previously old_buf. Used before calling val_int and after
@@ -3420,10 +5425,10 @@
 */
 
 void set_field_ptr(Field **ptr, const byte *new_buf,
-                            const byte *old_buf)
+                   const byte *old_buf)
 {
   my_ptrdiff_t diff= (new_buf - old_buf);
-  DBUG_ENTER("set_nullable_field_ptr");
+  DBUG_ENTER("set_field_ptr");
 
   do
   {
@@ -3438,11 +5443,13 @@
   point to the record where the values of the PF-fields are stored.
   This variant works on a key_part reference.
   It is not required that all fields are NOT NULL fields.
+
   SYNOPSIS
     set_key_field_ptr()
-    key_part            key part with a set of fields to change ptr
+    key_info            key info with a set of fields to change ptr
     new_buf             New record pointer
     old_buf             Old record pointer
+
   DESCRIPTION
     Set ptr in field objects of field array to refer to new_buf record
     instead of previously old_buf. Used before calling val_int and after
@@ -3455,7 +5462,8 @@
                        const byte *old_buf)
 {
   KEY_PART_INFO *key_part= key_info->key_part;
-  uint key_parts= key_info->key_parts, i= 0;
+  uint key_parts= key_info->key_parts;
+  uint i= 0;
   my_ptrdiff_t diff= (new_buf - old_buf);
   DBUG_ENTER("set_key_field_ptr");
 
@@ -3465,6 +5473,27 @@
     key_part++;
   } while (++i < key_parts);
   DBUG_VOID_RETURN;
+}
+
+
+/*
+  SYNOPSIS
+    mem_alloc_error()
+    size                Size of memory attempted to allocate
+    None
+
+  RETURN VALUES
+    None
+
+  DESCRIPTION
+    A routine to use for all the many places in the code where memory
+    allocation error can happen, a tremendous amount of them, needs
+    simple routine that signals this error.
+*/
+
+void mem_alloc_error(size_t size)
+{
+  my_error(ER_OUTOFMEMORY, MYF(0), size);
 }
 
 

--- 1.3/mysql-test/r/partition_mgm_err.result	2006-01-06 12:40:17 -06:00
+++ 1.4/mysql-test/r/partition_mgm_err.result	2006-01-18 09:40:19 -06:00
@@ -1,3 +1,4 @@
+drop table if exists t1;
 CREATE TABLE t1 (a int, b int)
 PARTITION BY RANGE (a)
 (PARTITION x0 VALUES LESS THAN (2),
@@ -10,48 +11,52 @@
 PARTITION x7 VALUES LESS THAN (16),
 PARTITION x8 VALUES LESS THAN (18),
 PARTITION x9 VALUES LESS THAN (20));
-ALTER TABLE t1 REORGANISE PARTITION x0,x1 INTO
+ALTER TABLE t1 REORGANIZE PARTITION x0,x1 INTO
 (PARTITION x01 VALUES LESS THAN (2),
 PARTITION x11 VALUES LESS THAN (5));
-ERROR HY000: The new partitions cover a bigger range then the reorganised partitions do
+ERROR HY000: Reorganize of range partitions cannot change total ranges except for last partition where it can extend the range
 ALTER TABLE t1 DROP PARTITION x0, x1, x2, x3, x3;
-ERROR HY000: Error in list of partitions to change
+ERROR HY000: Error in list of partitions to DROP
 ALTER TABLE t1 DROP PARTITION x0, x1, x2, x10;
-ERROR HY000: Error in list of partitions to change
+ERROR HY000: Error in list of partitions to DROP
 ALTER TABLE t1 DROP PARTITION x10, x1, x2, x1;
-ERROR HY000: Error in list of partitions to change
+ERROR HY000: Error in list of partitions to DROP
 ALTER TABLE t1 DROP PARTITION x10, x1, x2, x3;
-ERROR HY000: Error in list of partitions to change
-ALTER TABLE t1 REORGANISE PARTITION x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,x10 INTO
+ERROR HY000: Error in list of partitions to DROP
+ALTER TABLE t1 REORGANIZE PARTITION x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,x10 INTO
 (PARTITION x11 VALUES LESS THAN (22));
 ERROR HY000: More partitions to reorganise than there are partitions
-ALTER TABLE t1 REORGANISE PARTITION x0,x1,x2 INTO
+ALTER TABLE t1 REORGANIZE PARTITION x0,x1,x2 INTO
 (PARTITION x3 VALUES LESS THAN (6));
-ERROR HY000: All partitions must have unique names in the table
-ALTER TABLE t1 REORGANISE PARTITION x0, x2 INTO
+ERROR HY000: Duplicate partition name x3
+ALTER TABLE t1 REORGANIZE PARTITION x0, x2 INTO
 (PARTITION x11 VALUES LESS THAN (2));
 ERROR HY000: When reorganising a set of partitions they must be in consecutive order
-ALTER TABLE t1 REORGANISE PARTITION x0, x1, x1 INTO
+ALTER TABLE t1 REORGANIZE PARTITION x0, x1, x1 INTO
 (PARTITION x11 VALUES LESS THAN (4));
-ERROR HY000: Error in list of partitions to change
-ALTER TABLE t1 REORGANISE PARTITION x0,x1 INTO
+ERROR HY000: Error in list of partitions to REORGANIZE
+ALTER TABLE t1 REORGANIZE PARTITION x0,x1 INTO
 (PARTITION x01 VALUES LESS THAN (5));
-ERROR HY000: The new partitions cover a bigger range then the reorganised partitions do
-ALTER TABLE t1 REORGANISE PARTITION x0,x1 INTO
+ERROR HY000: Reorganize of range partitions cannot change total ranges except for last partition where it can extend the range
+ALTER TABLE t1 REORGANIZE PARTITION x0,x1 INTO
 (PARTITION x01 VALUES LESS THAN (4),
 PARTITION x11 VALUES LESS THAN (2));
+ERROR HY000: Reorganize of range partitions cannot change total ranges except for last partition where it can extend the range
+ALTER TABLE t1 REORGANIZE PARTITION x0,x1 INTO
+(PARTITION x01 VALUES LESS THAN (6),
+PARTITION x11 VALUES LESS THAN (4));
 ERROR HY000: VALUES LESS THAN value must be strictly increasing for each partition
 DROP TABLE t1;
 CREATE TABLE t1 (a int)
 PARTITION BY KEY (a)
 PARTITIONS 2;
 ALTER TABLE t1 ADD PARTITION (PARTITION p1);
-ERROR HY000: All partitions must have unique names in the table
+ERROR HY000: Duplicate partition name p1
 DROP TABLE t1;
 CREATE TABLE t1 (a int)
 PARTITION BY KEY (a)
 (PARTITION x0, PARTITION x1, PARTITION x2, PARTITION x3, PARTITION x3);
-ERROR HY000: All partitions must have unique names in the table
+ERROR HY000: Duplicate partition name x3
 CREATE TABLE t1 (a int, b int)
 PARTITION BY RANGE(a)
 SUBPARTITION BY HASH(b)
@@ -108,7 +113,7 @@
 ALTER TABLE t1 ADD PARTITION PARTITIONS 1;
 ERROR HY000: For RANGE partitions each partition must be defined
 ALTER TABLE t1 DROP PARTITION x2;
-ERROR HY000: Error in list of partitions to change
+ERROR HY000: Error in list of partitions to DROP
 ALTER TABLE t1 COALESCE PARTITION 1;
 ERROR HY000: COALESCE PARTITION can only be used on HASH/KEY partitions
 ALTER TABLE t1 DROP PARTITION x1;

--- 1.3/mysql-test/t/partition_mgm_err.test	2006-01-06 12:40:19 -06:00
+++ 1.4/mysql-test/t/partition_mgm_err.test	2006-01-18 09:29:45 -06:00
@@ -4,6 +4,10 @@
 #
 -- source include/have_partition.inc
 
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+
 #
 # Try faulty DROP PARTITION and COALESCE PARTITION
 #
@@ -21,7 +25,7 @@
  PARTITION x9 VALUES LESS THAN (20));
 
 --error ER_REORG_OUTSIDE_RANGE
-ALTER TABLE t1 REORGANISE PARTITION x0,x1 INTO
+ALTER TABLE t1 REORGANIZE PARTITION x0,x1 INTO
 (PARTITION x01 VALUES LESS THAN (2),
  PARTITION x11 VALUES LESS THAN (5));
 
@@ -38,29 +42,34 @@
 ALTER TABLE t1 DROP PARTITION x10, x1, x2, x3;
 
 --error ER_REORG_PARTITION_NOT_EXIST
-ALTER TABLE t1 REORGANISE PARTITION x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,x10 INTO
+ALTER TABLE t1 REORGANIZE PARTITION x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,x10 INTO
 (PARTITION x11 VALUES LESS THAN (22));
 
 --error ER_SAME_NAME_PARTITION
-ALTER TABLE t1 REORGANISE PARTITION x0,x1,x2 INTO
+ALTER TABLE t1 REORGANIZE PARTITION x0,x1,x2 INTO
 (PARTITION x3 VALUES LESS THAN (6));
 
 --error ER_CONSECUTIVE_REORG_PARTITIONS
-ALTER TABLE t1 REORGANISE PARTITION x0, x2 INTO
+ALTER TABLE t1 REORGANIZE PARTITION x0, x2 INTO
 (PARTITION x11 VALUES LESS THAN (2));
 
 --error ER_DROP_PARTITION_NON_EXISTENT
-ALTER TABLE t1 REORGANISE PARTITION x0, x1, x1 INTO
+ALTER TABLE t1 REORGANIZE PARTITION x0, x1, x1 INTO
 (PARTITION x11 VALUES LESS THAN (4));
 
 --error ER_REORG_OUTSIDE_RANGE
-ALTER TABLE t1 REORGANISE PARTITION x0,x1 INTO
+ALTER TABLE t1 REORGANIZE PARTITION x0,x1 INTO
 (PARTITION x01 VALUES LESS THAN (5));
 
---error ER_RANGE_NOT_INCREASING_ERROR
-ALTER TABLE t1 REORGANISE PARTITION x0,x1 INTO
+--error ER_REORG_OUTSIDE_RANGE
+ALTER TABLE t1 REORGANIZE PARTITION x0,x1 INTO
 (PARTITION x01 VALUES LESS THAN (4),
  PARTITION x11 VALUES LESS THAN (2));
+
+--error ER_RANGE_NOT_INCREASING_ERROR
+ALTER TABLE t1 REORGANIZE PARTITION x0,x1 INTO
+(PARTITION x01 VALUES LESS THAN (6),
+ PARTITION x11 VALUES LESS THAN (4));
 
 DROP TABLE t1;
 
Thread
bk commit into 5.1 tree (reggie:1.2017)reggie18 Jan