List:Commits« Previous MessageNext Message »
From:Sergey Petrunia Date:January 18 2006 11:09am
Subject:bk commit into 5.1 tree (sergefp:1.2072)
View as plain text  
Below is the list of changes that have just been committed into a local
5.1 repository of psergey. When psergey does a push these changes will
be propagated to the main repository and, within 24 hours after the
push, to the public repository.
For information on how to access the public repository
see http://dev.mysql.com/doc/mysql/en/installing-source-tree.html

ChangeSet
  1.2072 06/01/18 14:09:08 sergefp@stripped +9 -0
  Manual merge

  sql/sql_partition.cc
    1.23 06/01/18 14:09:00 sergefp@stripped +0 -0
    Manual merge

  sql/opt_range.cc
    1.196 06/01/18 14:09:00 sergefp@stripped +0 -0
    Manual merge

  mysql-test/t/partition.test
    1.11 06/01/18 14:09:00 sergefp@stripped +0 -1
    Manual merge

  sql/sql_select.cc
    1.382 06/01/18 13:56:19 sergefp@stripped +0 -0
    Auto merged

  sql/sql_lex.h
    1.212 06/01/18 13:56:18 sergefp@stripped +0 -0
    Auto merged

  sql/sql_class.cc
    1.237 06/01/18 13:56:18 sergefp@stripped +0 -0
    Auto merged

  sql/item.h
    1.188 06/01/18 13:56:18 sergefp@stripped +0 -0
    Auto merged

  sql/handler.h
    1.178 06/01/18 13:56:17 sergefp@stripped +0 -0
    Auto merged

  mysql-test/r/partition.result
    1.10 06/01/18 13:56:17 sergefp@stripped +0 -0
    Auto merged

# This is a BitKeeper patch.  What follows are the unified diffs for the
# set of deltas contained in the patch.  The rest of the patch, the part
# that BitKeeper cares about, is below these diffs.
# User:	sergefp
# Host:	newbox.mylan
# Root:	/home/psergey/mysql-5.1-ppruning-r5/RESYNC

--- 1.177/sql/handler.h	2006-01-05 17:46:58 +03:00
+++ 1.178/sql/handler.h	2006-01-18 13:56:17 +03:00
@@ -99,6 +99,7 @@
 #define HA_CAN_PARTITION       (1 << 0) /* Partition support */
 #define HA_CAN_UPDATE_PARTITION_KEY (1 << 1)
 #define HA_CAN_PARTITION_UNIQUE (1 << 2)
+#define HA_USE_AUTO_PARTITION (1 << 3)
 
 
 /* bits in index_flags(index_number) for what you can do with index */
@@ -109,9 +110,58 @@
 #define HA_ONLY_WHOLE_INDEX	16	/* Can't use part key searches */
 #define HA_KEYREAD_ONLY         64	/* Support HA_EXTRA_KEYREAD */
 
-/* bits in alter_table_flags */
-#define HA_ONLINE_ADD_EMPTY_PARTITION 1
-#define HA_ONLINE_DROP_PARTITION 2
+/*
+  bits in alter_table_flags:
+*/
+/*
+  These bits are set if different kinds of indexes can be created
+  off-line without re-create of the table (but with a table lock).
+*/
+#define HA_ONLINE_ADD_INDEX_NO_WRITES           (1L << 0) /*add index w/lock*/
+#define HA_ONLINE_DROP_INDEX_NO_WRITES          (1L << 1) /*drop index w/lock*/
+#define HA_ONLINE_ADD_UNIQUE_INDEX_NO_WRITES    (1L << 2) /*add unique w/lock*/
+#define HA_ONLINE_DROP_UNIQUE_INDEX_NO_WRITES   (1L << 3) /*drop uniq. w/lock*/
+#define HA_ONLINE_ADD_PK_INDEX_NO_WRITES        (1L << 4) /*add prim. w/lock*/
+#define HA_ONLINE_DROP_PK_INDEX_NO_WRITES       (1L << 5) /*drop prim. w/lock*/
+/*
+  These are set if different kinds of indexes can be created on-line
+  (without a table lock). If a handler is capable of one or more of
+  these, it should also set the corresponding *_NO_WRITES bit(s).
+*/
+#define HA_ONLINE_ADD_INDEX                     (1L << 6) /*add index online*/
+#define HA_ONLINE_DROP_INDEX                    (1L << 7) /*drop index online*/
+#define HA_ONLINE_ADD_UNIQUE_INDEX              (1L << 8) /*add unique online*/
+#define HA_ONLINE_DROP_UNIQUE_INDEX             (1L << 9) /*drop uniq. online*/
+#define HA_ONLINE_ADD_PK_INDEX                  (1L << 10)/*add prim. online*/
+#define HA_ONLINE_DROP_PK_INDEX                 (1L << 11)/*drop prim. online*/
+/*
+  HA_PARTITION_FUNCTION_SUPPORTED indicates that the function is
+  supported at all.
+  HA_FAST_CHANGE_PARTITION means that optimised variants of the changes
+  exists but they are not necessarily done online.
+
+  HA_ONLINE_DOUBLE_WRITE means that the handler supports writing to both
+  the new partition and to the old partitions when updating through the
+  old partitioning schema while performing a change of the partitioning.
+  This means that we can support updating of the table while performing
+  the copy phase of the change. For no lock at all also a double write
+  from new to old must exist and this is not required when this flag is
+  set.
+  This is actually removed even before it was introduced the first time.
+  The new idea is that handlers will handle the lock level already in
+  store_lock for ALTER TABLE partitions.
+
+  HA_PARTITION_ONE_PHASE is a flag that can be set by handlers that take
+  care of changing the partitions online and in one phase. Thus all phases
+  needed to handle the change are implemented inside the storage engine.
+  The storage engine must also support auto-discovery since the frm file
+  is changed as part of the change and this change must be controlled by
+  the storage engine. A typical engine to support this is NDB (through
+  WL #2498).
+*/
+#define HA_PARTITION_FUNCTION_SUPPORTED         (1L << 12)
+#define HA_FAST_CHANGE_PARTITION                (1L << 13)
+#define HA_PARTITION_ONE_PHASE                  (1L << 14)
 
 /*
   Index scan will not return records in rowid order. Not guaranteed to be
@@ -119,7 +169,6 @@
 */
 #define HA_KEY_SCAN_NOT_ROR     128 
 
-
 /* operations for disable/enable indexes */
 #define HA_KEY_SWITCH_NONUNIQ      0
 #define HA_KEY_SWITCH_ALL          1
@@ -135,16 +184,6 @@
 #define MAX_HA 15
 
 /*
-  Bits in index_ddl_flags(KEY *wanted_index)
-  for what ddl you can do with index
-  If none is set, the wanted type of index is not supported
-  by the handler at all. See WorkLog 1563.
-*/
-#define HA_DDL_SUPPORT   1 /* Supported by handler */
-#define HA_DDL_WITH_LOCK 2 /* Can create/drop with locked table */
-#define HA_DDL_ONLINE    4 /* Can create/drop without lock */
-
-/*
   Parameters for open() (in register form->filestat)
   HA_GET_INFO does an implicit HA_ABORT_IF_LOCKED
 */
@@ -205,6 +244,24 @@
 		ROW_TYPE_DYNAMIC, ROW_TYPE_COMPRESSED,
 		ROW_TYPE_REDUNDANT, ROW_TYPE_COMPACT };
 
+enum enum_binlog_func {
+  BFN_RESET_LOGS=        1,
+  BFN_RESET_SLAVE=       2,
+  BFN_BINLOG_WAIT=       3,
+  BFN_BINLOG_END=        4,
+  BFN_BINLOG_PURGE_FILE= 5
+};
+
+enum enum_binlog_command {
+  LOGCOM_CREATE_TABLE,
+  LOGCOM_ALTER_TABLE,
+  LOGCOM_RENAME_TABLE,
+  LOGCOM_DROP_TABLE,
+  LOGCOM_CREATE_DB,
+  LOGCOM_ALTER_DB,
+  LOGCOM_DROP_DB
+};
+
 /* struct to hold information about the table that should be created */
 
 /* Bits in used_fields */
@@ -315,6 +372,82 @@
 #define MAX_XID_LIST_SIZE  (1024*128)
 #endif
 
+/*
+  These structures are used to pass information from a set of SQL commands
+  on add/drop/change tablespace definitions to the proper hton.
+*/
+#define UNDEF_NODEGROUP 65535
+enum ts_command_type
+{
+  TS_CMD_NOT_DEFINED = -1,
+  CREATE_TABLESPACE = 0,
+  ALTER_TABLESPACE = 1,
+  CREATE_LOGFILE_GROUP = 2,
+  ALTER_LOGFILE_GROUP = 3,
+  DROP_TABLESPACE = 4,
+  DROP_LOGFILE_GROUP = 5,
+  CHANGE_FILE_TABLESPACE = 6,
+  ALTER_ACCESS_MODE_TABLESPACE = 7
+};
+
+enum ts_alter_tablespace_type
+{
+  TS_ALTER_TABLESPACE_TYPE_NOT_DEFINED = -1,
+  ALTER_TABLESPACE_ADD_FILE = 1,
+  ALTER_TABLESPACE_DROP_FILE = 2
+};
+
+enum tablespace_access_mode
+{
+  TS_NOT_DEFINED= -1,
+  TS_READ_ONLY = 0,
+  TS_READ_WRITE = 1,
+  TS_NOT_ACCESSIBLE = 2
+};
+
+class st_alter_tablespace : public Sql_alloc
+{
+  public:
+  const char *tablespace_name;
+  const char *logfile_group_name;
+  enum ts_command_type ts_cmd_type;
+  enum ts_alter_tablespace_type ts_alter_tablespace_type;
+  const char *data_file_name;
+  const char *undo_file_name;
+  const char *redo_file_name;
+  ulonglong extent_size;
+  ulonglong undo_buffer_size;
+  ulonglong redo_buffer_size;
+  ulonglong initial_size;
+  ulonglong autoextend_size;
+  ulonglong max_size;
+  uint nodegroup_id;
+  enum legacy_db_type storage_engine;
+  bool wait_until_completed;
+  const char *ts_comment;
+  enum tablespace_access_mode ts_access_mode;
+  st_alter_tablespace()
+  {
+    tablespace_name= NULL;
+    logfile_group_name= "DEFAULT_LG"; //Default log file group
+    ts_cmd_type= TS_CMD_NOT_DEFINED;
+    data_file_name= NULL;
+    undo_file_name= NULL;
+    redo_file_name= NULL;
+    extent_size= 1024*1024;        //Default 1 MByte
+    undo_buffer_size= 8*1024*1024; //Default 8 MByte
+    redo_buffer_size= 8*1024*1024; //Default 8 MByte
+    initial_size= 128*1024*1024;   //Default 128 MByte
+    autoextend_size= 0;            //No autoextension as default
+    max_size= 0;                   //Max size == initial size => no extension
+    storage_engine= DB_TYPE_UNKNOWN;
+    nodegroup_id= UNDEF_NODEGROUP;
+    wait_until_completed= TRUE;
+    ts_comment= NULL;
+    ts_access_mode= TS_NOT_DEFINED;
+  }
+};
+
 /* The handler for a table type.  Will be included in the TABLE structure */
 
 struct st_table;
@@ -344,7 +477,8 @@
     handlerton structure version
    */
   const int interface_version;
-#define MYSQL_HANDLERTON_INTERFACE_VERSION 0x0000
+/* last version change: 0x0001 in 5.1.6 */
+#define MYSQL_HANDLERTON_INTERFACE_VERSION 0x0001
 
 
   /*
@@ -434,7 +568,19 @@
    int (*start_consistent_snapshot)(THD *thd);
    bool (*flush_logs)();
    bool (*show_status)(THD *thd, stat_print_fn *print, enum ha_stat_type stat);
+   uint (*partition_flags)();
+   uint (*alter_table_flags)(uint flags);
+   int (*alter_tablespace)(THD *thd, st_alter_tablespace *ts_info);
    uint32 flags;                                /* global handler flags */
+   /* 
+      Handlerton functions are not set in the different storage
+      engines static initialization.  They are initialized at handler init.
+      Thus, leave them last in the struct.
+   */
+   int (*binlog_func)(THD *thd, enum_binlog_func fn, void *arg);
+   void (*binlog_log_query)(THD *thd, enum_binlog_command binlog_command,
+                            const char *query, uint query_length,
+                            const char *db, const char *table_name);
 } handlerton;
 
 extern const handlerton default_hton;
@@ -490,12 +636,27 @@
   PART_NORMAL= 0,
   PART_IS_DROPPED= 1,
   PART_TO_BE_DROPPED= 2,
-  PART_DROPPING= 3,
-  PART_IS_ADDED= 4,
-  PART_ADDING= 5,
-  PART_ADDED= 6
+  PART_TO_BE_ADDED= 3,
+  PART_TO_BE_REORGED= 4,
+  PART_REORGED_DROPPED= 5,
+  PART_CHANGED= 6,
+  PART_IS_CHANGED= 7,
+  PART_IS_ADDED= 8
 };
 
+typedef struct {
+  ulonglong data_file_length;
+  ulonglong max_data_file_length;
+  ulonglong index_file_length;
+  ulonglong delete_length;
+  ha_rows records;
+  ulong mean_rec_length;
+  time_t create_time;
+  time_t check_time;
+  time_t update_time;
+  ulonglong check_sum;
+} PARTITION_INFO;
+
 #define UNDEF_NODEGROUP 65535
 class Item;
 
@@ -530,13 +691,14 @@
 
 typedef struct {
   longlong list_value;
-  uint partition_id;
+  uint32 partition_id;
 } LIST_PART_ENTRY;
 
 class partition_info;
 
-typedef bool (*get_part_id_func)(partition_info *part_info,
-                                 uint32 *part_id);
+typedef int (*get_part_id_func)(partition_info *part_info,
+                                 uint32 *part_id,
+                                 longlong *func_value);
 typedef uint32 (*get_subpart_id_func)(partition_info *part_info);
 
 
@@ -739,6 +901,8 @@
   char *part_func_string;
   char *subpart_func_string;
 
+  uchar *part_state;
+
   partition_element *curr_part_elem;
   partition_element *current_partition;
   /*
@@ -755,12 +919,12 @@
   partition_type subpart_type;
 
   uint part_info_len;
+  uint part_state_len;
   uint part_func_len;
   uint subpart_func_len;
 
   uint no_parts;
   uint no_subparts;
-  uint count_curr_parts;
   uint count_curr_subparts;
 
   uint part_error_code;
@@ -771,14 +935,24 @@
   uint no_subpart_fields;
   uint no_full_part_fields;
 
+  /*
+    This variable is used to calculate the partition id when using
+    LINEAR KEY/HASH. This functionality is kept in the MySQL Server
+    but mainly of use to handlers supporting partitioning.
+  */
   uint16 linear_hash_mask;
 
   bool use_default_partitions;
+  bool use_default_no_partitions;
   bool use_default_subpartitions;
+  bool use_default_no_subpartitions;
+  bool default_partitions_setup;
   bool defined_max_value;
   bool list_of_part_fields;
   bool list_of_subpart_fields;
   bool linear_hash_ind;
+  bool fixed;
+  bool from_openfrm;
 
   partition_info()
   : get_partition_id(NULL), get_part_partition_id(NULL),
@@ -789,19 +963,27 @@
     list_array(NULL),
     part_info_string(NULL),
     part_func_string(NULL), subpart_func_string(NULL),
+    part_state(NULL),
     curr_part_elem(NULL), current_partition(NULL),
     default_engine_type(NULL),
     part_result_type(INT_RESULT),
     part_type(NOT_A_PARTITION), subpart_type(NOT_A_PARTITION),
-    part_info_len(0), part_func_len(0), subpart_func_len(0),
+    part_info_len(0), part_state_len(0),
+    part_func_len(0), subpart_func_len(0),
     no_parts(0), no_subparts(0),
-    count_curr_parts(0), count_curr_subparts(0), part_error_code(0),
+    count_curr_subparts(0), part_error_code(0),
     no_list_values(0), no_part_fields(0), no_subpart_fields(0),
     no_full_part_fields(0), linear_hash_mask(0),
     use_default_partitions(TRUE),
-    use_default_subpartitions(TRUE), defined_max_value(FALSE),
+    use_default_no_partitions(TRUE),
+    use_default_subpartitions(TRUE),
+    use_default_no_subpartitions(TRUE),
+    default_partitions_setup(FALSE),
+    defined_max_value(FALSE),
     list_of_part_fields(FALSE), list_of_subpart_fields(FALSE),
-    linear_hash_ind(FALSE)
+    linear_hash_ind(FALSE),
+    fixed(FALSE),
+    from_openfrm(FALSE)
   {
     all_fields_in_PF.clear_all();
     all_fields_in_PPF.clear_all();
@@ -868,13 +1050,15 @@
   return part_info->no_parts *
          (is_sub_partitioned(part_info) ? part_info->no_subparts : 1);
 }
+
+
 #endif
 
 typedef struct st_ha_create_information
 {
   CHARSET_INFO *table_charset, *default_table_charset;
   LEX_STRING connect_string;
-  const char *comment,*password;
+  const char *comment,*password, *tablespace;
   const char *data_file_name, *index_file_name;
   const char *alias;
   ulonglong max_rows,min_rows;
@@ -894,6 +1078,7 @@
   bool table_existed;			/* 1 in create if table existed */
   bool frm_only;                        /* 1 if no ha_create_table() */
   bool varchar;                         /* 1 if table has a VARCHAR */
+  bool store_on_disk;                   /* 1 if table stored on disk */
 } HA_CREATE_INFO;
 
 
@@ -916,8 +1101,8 @@
 
 #ifdef WITH_PARTITION_STORAGE_ENGINE
 bool is_partition_in_list(char *part_name, List<char> list_part_names);
-bool is_partitions_in_table(partition_info *new_part_info,
-                            partition_info *old_part_info);
+char *are_partitions_in_table(partition_info *new_part_info,
+                              partition_info *old_part_info);
 bool check_reorganise_list(partition_info *new_part_info,
                            partition_info *old_part_info,
                            List<char> list_part_names);
@@ -928,15 +1113,17 @@
 handler *get_ha_partition(partition_info *part_info);
 int get_parts_for_update(const byte *old_data, byte *new_data,
                          const byte *rec0, partition_info *part_info,
-                         uint32 *old_part_id, uint32 *new_part_id);
+                         uint32 *old_part_id, uint32 *new_part_id,
+                         longlong *func_value);
 int get_part_for_delete(const byte *buf, const byte *rec0,
                         partition_info *part_info, uint32 *part_id);
-bool check_partition_info(partition_info *part_info,handlerton *eng_type,
+bool check_partition_info(partition_info *part_info,handlerton **eng_type,
                           handler *file, ulonglong max_rows);
-bool fix_partition_func(THD *thd, const char *name, TABLE *table);
+bool fix_partition_func(THD *thd, const char *name, TABLE *table,
+                        bool create_table_ind);
 char *generate_partition_syntax(partition_info *part_info,
                                 uint *buf_length, bool use_sql_alloc,
-                                bool add_default_info);
+                                bool write_all);
 bool partition_key_modified(TABLE *table, List<Item> &fields);
 void get_partition_set(const TABLE *table, byte *buf, const uint index,
                        const key_range *key_spec,
@@ -946,7 +1133,9 @@
                                const key_range *key_spec,
                                part_id_range *part_spec);
 bool mysql_unpack_partition(THD *thd, const uchar *part_buf,
-                            uint part_info_len, TABLE *table,
+                            uint part_info_len,
+                            uchar *part_state, uint part_state_len,
+                            TABLE *table, bool is_create_table_ind,
                             handlerton *default_db_type);
 void make_used_partitions_str(partition_info *part_info, String *parts_str);
 uint32 get_list_array_idx_for_endpoint(partition_info *part_info,
@@ -972,7 +1161,6 @@
   byte *end_of_used_area;     /* End of area that was used by handler */
 } HANDLER_BUFFER;
 
-
 class handler :public Sql_alloc
 {
 #ifdef WITH_PARTITION_STORAGE_ENGINE
@@ -1260,6 +1448,12 @@
   virtual int ha_update_row(const byte * old_data, byte * new_data);
   virtual int ha_delete_row(const byte * buf);
   /*
+    If the handler does it's own injection of the rows, this member function
+    should return 'true'.
+  */
+  virtual bool is_injective() const { return false; }
+  
+  /*
     SYNOPSIS
       start_bulk_update()
     RETURN
@@ -1384,6 +1578,8 @@
     { return (ha_rows) 10; }
   virtual void position(const byte *record)=0;
   virtual void info(uint)=0; // see my_base.h for full description
+  virtual void get_dynamic_partition_info(PARTITION_INFO *stat_info,
+                                          uint part_id);
   virtual int extra(enum ha_extra_function operation)
   { return 0; }
   virtual int extra_opt(enum ha_extra_function operation, ulong cache_size)
@@ -1498,18 +1694,25 @@
   virtual const char *table_type() const =0;
   virtual const char **bas_ext() const =0;
   virtual ulong table_flags(void) const =0;
-  virtual ulong alter_table_flags(void) const { return 0; }
 #ifdef WITH_PARTITION_STORAGE_ENGINE
-  virtual ulong partition_flags(void) const { return 0;}
   virtual int get_default_no_partitions(ulonglong max_rows) { return 1;}
-  virtual void set_part_info(partition_info *part_info) { return; }
+  virtual void set_auto_partitions(partition_info *part_info) { return; }
+  virtual bool get_no_parts(const char *name,
+                            uint *no_parts)
+  {
+    *no_parts= 0;
+    return 0;
+  }
+  virtual void set_part_info(partition_info *part_info) {return;}
 #endif
   virtual ulong index_flags(uint idx, uint part, bool all_parts) const =0;
-  virtual ulong index_ddl_flags(KEY *wanted_index) const
-  { return (HA_DDL_SUPPORT); }
+
   virtual int add_index(TABLE *table_arg, KEY *key_info, uint num_of_keys)
   { return (HA_ERR_WRONG_COMMAND); }
-  virtual int drop_index(TABLE *table_arg, uint *key_num, uint num_of_keys)
+  virtual int prepare_drop_index(TABLE *table_arg, uint *key_num,
+                                 uint num_of_keys)
+  { return (HA_ERR_WRONG_COMMAND); }
+  virtual int final_drop_index(TABLE *table_arg)
   { return (HA_ERR_WRONG_COMMAND); }
 
   uint max_record_length() const
@@ -1546,19 +1749,26 @@
   virtual int create(const char *name, TABLE *form, HA_CREATE_INFO *info)=0;
   virtual int create_handler_files(const char *name) { return FALSE;}
 
-  /*
-    SYNOPSIS
-      drop_partitions()
-      path                        Complete path of db and table name
-    RETURN VALUE
-      TRUE                        Failure
-      FALSE                       Success
-    DESCRIPTION
-      Drop a partition, during this operation no other activity is ongoing
-      in this server on the table.
-  */
+  virtual int change_partitions(HA_CREATE_INFO *create_info,
+                                const char *path,
+                                ulonglong *copied,
+                                ulonglong *deleted,
+                                const void *pack_frm_data,
+                                uint pack_frm_len)
+  { return HA_ERR_WRONG_COMMAND; }
   virtual int drop_partitions(const char *path)
   { return HA_ERR_WRONG_COMMAND; }
+  virtual int rename_partitions(const char *path)
+  { return HA_ERR_WRONG_COMMAND; }
+  virtual int optimize_partitions(THD *thd)
+  { return HA_ERR_WRONG_COMMAND; }
+  virtual int analyze_partitions(THD *thd)
+  { return HA_ERR_WRONG_COMMAND; }
+  virtual int check_partitions(THD *thd)
+  { return HA_ERR_WRONG_COMMAND; }
+  virtual int repair_partitions(THD *thd)
+  { return HA_ERR_WRONG_COMMAND; }
+
   /* lock_count() can be more than one if the table is a MERGE */
   virtual uint lock_count(void) const { return 1; }
   virtual THR_LOCK_DATA **store_lock(THD *thd,
@@ -1693,7 +1903,7 @@
 
 inline bool ha_storage_engine_is_enabled(const handlerton *db_type)
 {
-  return (db_type && db_type->create) ? 
+  return (db_type && db_type->create) ?
          (db_type->state == SHOW_OPTION_YES) : FALSE;
 }
 
@@ -1704,7 +1914,6 @@
 
 TYPELIB *ha_known_exts(void);
 int ha_panic(enum ha_panic_function flag);
-int ha_update_statistics();
 void ha_close_connection(THD* thd);
 bool ha_flush_logs(handlerton *db_type);
 void ha_drop_database(char* path);
@@ -1769,3 +1978,21 @@
 int ha_repl_report_sent_binlog(THD *thd, char *log_file_name,
                                my_off_t end_offset);
 int ha_repl_report_replication_stop(THD *thd);
+
+#ifdef HAVE_NDB_BINLOG
+int ha_reset_logs(THD *thd);
+int ha_binlog_index_purge_file(THD *thd, const char *file);
+void ha_reset_slave(THD *thd);
+void ha_binlog_log_query(THD *thd, enum_binlog_command binlog_command,
+                         const char *query, uint query_length,
+                         const char *db, const char *table_name);
+void ha_binlog_wait(THD *thd);
+int ha_binlog_end(THD *thd);
+#else
+#define ha_reset_logs(a) 0
+#define ha_binlog_index_purge_file(a,b) 0
+#define ha_reset_slave(a)
+#define ha_binlog_log_query(a,b,c,d,e,f);
+#define ha_binlog_wait(a)
+#define ha_binlog_end(a) 0
+#endif

--- 1.187/sql/item.h	2005-12-27 15:04:27 +03:00
+++ 1.188/sql/item.h	2006-01-18 13:56:18 +03:00
@@ -1789,11 +1789,7 @@
   void make_field(Send_field *field);
   bool fix_fields(THD *, Item **);
   int save_in_field(Field *field, bool no_conversions);
-  void save_org_in_field(Field *field)
-  {
-    (*ref)->save_org_in_field(field);
-    null_value= (*ref)->null_value;
-  }
+  void save_org_in_field(Field *field);
   enum Item_result result_type () const { return (*ref)->result_type(); }
   enum_field_types field_type() const   { return (*ref)->field_type(); }
   Field *get_tmp_table_field()

--- 1.195/sql/opt_range.cc	2006-01-05 17:46:58 +03:00
+++ 1.196/sql/opt_range.cc	2006-01-18 14:09:00 +03:00
@@ -2773,8 +2773,10 @@
         DBUG_EXECUTE("info", dbug_print_singlepoint_range(ppar->arg_stack,
                                                        ppar->part_fields););
         uint32 part_id;
+        longlong func_value;
         /* Find in which partition the {const1, ...,constN} tuple goes */
-        if (ppar->get_top_partition_id_func(ppar->part_info, &part_id))
+        if (ppar->get_top_partition_id_func(ppar->part_info, &part_id,
+                                            &func_value))
         {
           res= 0; /* No satisfying partitions */
           goto pop_and_go_right;
@@ -7005,6 +7007,7 @@
 
 /*
   Create quick select from ref/ref_or_null scan.
+
   SYNOPSIS
     get_quick_select_for_ref()
       thd      Thread handle
@@ -7024,15 +7027,18 @@
 QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table,
                                              TABLE_REF *ref, ha_rows records)
 {
-  MEM_ROOT *old_root= thd->mem_root;
-  /* The following call may change thd->mem_root */
-  QUICK_RANGE_SELECT *quick= new QUICK_RANGE_SELECT(thd, table, ref->key, 0);
-  /* save mem_root set by QUICK_RANGE_SELECT constructor */
-  MEM_ROOT *alloc= thd->mem_root;
+  MEM_ROOT *old_root, *alloc;
+  QUICK_RANGE_SELECT *quick;
   KEY *key_info = &table->key_info[ref->key];
   KEY_PART *key_part;
   QUICK_RANGE *range;
   uint part;
+
+  old_root= thd->mem_root;
+  /* The following call may change thd->mem_root */
+  quick= new QUICK_RANGE_SELECT(thd, table, ref->key, 0);
+  /* save mem_root set by QUICK_RANGE_SELECT constructor */
+  alloc= thd->mem_root;
   /*
     return back default mem_root (thd->mem_root) changed by
     QUICK_RANGE_SELECT constructor

--- 1.236/sql/sql_class.cc	2006-01-17 15:32:22 +03:00
+++ 1.237/sql/sql_class.cc	2006-01-18 13:56:18 +03:00
@@ -763,7 +763,10 @@
 #ifdef WITH_PARTITION_STORAGE_ENGINE
   if (lex->describe & DESCRIBE_PARTITIONS)
   {
-    field_list.push_back(item= new Item_empty_string("partitions", 10, cs));
+    /* Maximum length of string that make_used_partitions_str() can produce */
+    item= new Item_empty_string("partitions", MAX_PARTITIONS * (1 + FN_LEN),
+                                cs);
+    field_list.push_back(item);
     item->maybe_null= 1;
   }
 #endif

--- 1.211/sql/sql_lex.h	2006-01-17 10:37:27 +03:00
+++ 1.212/sql/sql_lex.h	2006-01-18 13:56:18 +03:00
@@ -110,7 +110,7 @@
 #define DESCRIBE_NORMAL		1
 #define DESCRIBE_EXTENDED	2
 /*
-  This is not #ifdef'ed because we want "EXPLAIN PARTITIONS ..." to produce
+  This is not within #ifdef because we want "EXPLAIN PARTITIONS ..." to produce
   additional "partitions" column even if partitioning is not compiled in.
 */
 #define DESCRIBE_PARTITIONS	4

--- 1.381/sql/sql_select.cc	2006-01-04 11:08:49 +03:00
+++ 1.382/sql/sql_select.cc	2006-01-18 13:56:19 +03:00
@@ -365,7 +365,8 @@
     if (having_fix_rc || thd->net.report_error)
       DBUG_RETURN(-1);				/* purecov: inspected */
     if (having->with_sum_func)
-      having->split_sum_func(thd, ref_pointer_array, all_fields);
+      having->split_sum_func2(thd, ref_pointer_array, all_fields,
+                              &having, TRUE);
     thd->lex->allow_sum_func= save_allow_sum_func;
   }
   if (select_lex->inner_sum_func_list)
@@ -3512,13 +3513,32 @@
     parts of the row from any of the used index.
     This is because table scans uses index and we would not win
     anything by using a table scan.
+
+    A word for word translation of the below if-statement in psergey's
+    understanding: we check if we should use table scan if:
+    (1) The found 'ref' access produces more records than a table scan
+        (or index scan, or quick select), or 'ref' is more expensive than
+        any of them.
+    (2) This doesn't hold: the best way to perform table scan is to to perform
+        'range' access using index IDX, and the best way to perform 'ref' 
+        access is to use the same index IDX, with the same or more key parts.
+        (note: it is not clear how this rule is/should be extended to 
+        index_merge quick selects)
+    (3) See above note about InnoDB.
+    (4) NOT ("FORCE INDEX(...)" is used for table and there is 'ref' access
+             path, but there is no quick select)
+        If the condition in the above brackets holds, then the only possible
+        "table scan" access method is ALL/index (there is no quick select).
+        Since we have a 'ref' access path, and FORCE INDEX instructs us to
+        choose it over ALL/index, there is no need to consider a full table
+        scan.
   */
-  if ((records >= s->found_records || best > s->read_time) &&
-      !(s->quick && best_key && s->quick->index == best_key->key &&
-        best_max_key_part >= s->table->quick_key_parts[best_key->key]) &&
-      !((s->table->file->table_flags() & HA_TABLE_SCAN_ON_INDEX) &&
-        ! s->table->used_keys.is_clear_all() && best_key) &&
-      !(s->table->force_index && best_key))
+  if ((records >= s->found_records || best > s->read_time) &&            // (1)
+      !(s->quick && best_key && s->quick->index == best_key->key &&      // (2)
+        best_max_key_part >= s->table->quick_key_parts[best_key->key]) &&// (2)
+      !((s->table->file->table_flags() & HA_TABLE_SCAN_ON_INDEX) &&      // (3)
+        ! s->table->used_keys.is_clear_all() && best_key) &&             // (3)
+      !(s->table->force_index && best_key && !s->quick))                 // (4)
   {                                             // Check full join
     ha_rows rnd_records= s->found_records;
     /*
@@ -4501,13 +4521,15 @@
 	parts of the row from any of the used index.
 	This is because table scans uses index and we would not win
 	anything by using a table scan.
+        (see comment in best_access_path() for more details on the below
+         condition)
       */
       if ((records >= s->found_records || best > s->read_time) &&
 	  !(s->quick && best_key && s->quick->index == best_key->key &&
 	    best_max_key_part >= s->table->quick_key_parts[best_key->key]) &&
 	  !((s->table->file->table_flags() & HA_TABLE_SCAN_ON_INDEX) &&
 	    ! s->table->used_keys.is_clear_all() && best_key) &&
-	  !(s->table->force_index && best_key))
+	  !(s->table->force_index && best_key && !s->quick))
       {						// Check full join
         ha_rows rnd_records= s->found_records;
         /*
@@ -8655,6 +8677,11 @@
         have null
       */
       hidden_null_count=null_count;
+      /*
+	We need to update hidden_field_count as we may have stored group
+	functions with constant arguments
+      */
+      param->hidden_field_count= (uint) (reg_field - table->field);
       null_count= 0;
     }
   }
@@ -8876,7 +8903,7 @@
     }
   }
 
-  if (distinct)
+  if (distinct && field_count != param->hidden_field_count)
   {
     /*
       Create an unique key or an unique constraint over all columns
@@ -9942,6 +9969,7 @@
   int error;
   READ_RECORD *info;
 
+  join_tab->table->null_row= 0;
   if (!join_tab->cache.records)
     return NESTED_LOOP_OK;                      /* Nothing to do */
   if (skip_last)
@@ -10777,7 +10805,7 @@
     item->save_org_in_field(group->field);
     /* Store in the used key if the field was 0 */
     if (item->maybe_null)
-      group->buff[-1]=item->null_value ? 1 : 0;
+      group->buff[-1]= (char) group->field->is_null();
   }
   if (!table->file->index_read(table->record[1],
 			       join->tmp_table_param.group_buff,0,

--- 1.9/mysql-test/r/partition.result	2005-12-27 15:04:27 +03:00
+++ 1.10/mysql-test/r/partition.result	2006-01-18 13:56:17 +03:00
@@ -65,6 +65,8 @@
 (partition x1 tablespace ts1,
 partition x2 tablespace ts2,
 partition x3 tablespace ts3);
+CREATE TABLE t2 LIKE t1;
+drop table t2;
 drop table t1;
 CREATE TABLE t1 (
 a int not null,
@@ -108,6 +110,127 @@
 insert into t1 values (4);
 UNLOCK TABLES;
 drop table t1;
+CREATE TABLE t1 (a int, name VARCHAR(50), purchased DATE)
+PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (3),
+PARTITION p1 VALUES LESS THAN (7),
+PARTITION p2 VALUES LESS THAN (9),
+PARTITION p3 VALUES LESS THAN (11));
+INSERT INTO t1 VALUES
+(1, 'desk organiser', '2003-10-15'),
+(2, 'CD player', '1993-11-05'),
+(3, 'TV set', '1996-03-10'),
+(4, 'bookcase', '1982-01-10'),
+(5, 'exercise bike', '2004-05-09'),
+(6, 'sofa', '1987-06-05'),
+(7, 'popcorn maker', '2001-11-22'),
+(8, 'acquarium', '1992-08-04'),
+(9, 'study desk', '1984-09-16'),
+(10, 'lava lamp', '1998-12-25');
+SELECT * from t1 ORDER BY a;
+a	name	purchased
+1	desk organiser	2003-10-15
+2	CD player	1993-11-05
+3	TV set	1996-03-10
+4	bookcase	1982-01-10
+5	exercise bike	2004-05-09
+6	sofa	1987-06-05
+7	popcorn maker	2001-11-22
+8	acquarium	1992-08-04
+9	study desk	1984-09-16
+10	lava lamp	1998-12-25
+ALTER TABLE t1 DROP PARTITION p0;
+SELECT * from t1 ORDER BY a;
+a	name	purchased
+3	TV set	1996-03-10
+4	bookcase	1982-01-10
+5	exercise bike	2004-05-09
+6	sofa	1987-06-05
+7	popcorn maker	2001-11-22
+8	acquarium	1992-08-04
+9	study desk	1984-09-16
+10	lava lamp	1998-12-25
+drop table t1;
+CREATE TABLE t1 (a int)
+PARTITION BY LIST (a)
+(PARTITION p0 VALUES IN (1,2,3), PARTITION p1 VALUES IN (4,5,6));
+insert into t1 values (1),(2),(3),(4),(5),(6);
+select * from t1;
+a
+1
+2
+3
+4
+5
+6
+truncate t1;
+select * from t1;
+a
+truncate t1;
+select * from t1;
+a
+drop table t1;
+CREATE TABLE t1 (a int, b int, primary key(a,b))
+PARTITION BY KEY(b,a) PARTITIONS 4;
+insert into t1 values (0,0),(1,1),(2,2),(3,3),(4,4),(5,5),(6,6);
+select * from t1 where a = 4;
+a	b
+4	4
+drop table t1;
+CREATE TABLE t1 (a int)
+PARTITION BY LIST (a)
+PARTITIONS 1
+(PARTITION x1 VALUES IN (1) ENGINE=MEMORY);
+show create table t1;
+Table	Create Table
+t1	CREATE TABLE `t1` (
+  `a` int(11) default NULL
+) ENGINE=MEMORY DEFAULT CHARSET=latin1 PARTITION BY LIST (a) (PARTITION x1 VALUES IN (1) ENGINE = MEMORY)
+drop table t1;
+CREATE TABLE t1 (a int, unique(a))
+PARTITION BY LIST (a)
+(PARTITION x1 VALUES IN (10), PARTITION x2 VALUES IN (20));
+REPLACE t1 SET a = 4;
+ERROR HY000: Table has no partition for value 4
+drop table t1;
+CREATE TABLE t1 (a int)
+PARTITION BY LIST (a)
+(PARTITION x1 VALUES IN (2), PARTITION x2 VALUES IN (3));
+insert into t1 values (2), (3);
+insert into t1 values (4);
+ERROR HY000: Table has no partition for value 4
+insert into t1 values (1);
+ERROR HY000: Table has no partition for value 1
+drop table t1;
+CREATE TABLE t1 (a int)
+PARTITION BY HASH(a)
+PARTITIONS 5;
+SHOW CREATE TABLE t1;
+Table	Create Table
+t1	CREATE TABLE `t1` (
+  `a` int(11) default NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH (a) PARTITIONS 5 
+drop table t1;
+CREATE TABLE t1 (a int)
+PARTITION BY RANGE (a)
+(PARTITION x1 VALUES LESS THAN (2));
+insert into t1 values (1);
+update t1 set a = 5;
+ERROR HY000: Table has no partition for value 5
+drop table t1;
+CREATE TABLE t1 (a int)
+PARTITION BY LIST (a)
+(PARTITION x1 VALUES IN (10), PARTITION x2 VALUES IN (20));
+analyze table t1;
+Table	Op	Msg_type	Msg_text
+test.t1	analyze	status	OK
+drop table t1;
+CREATE TABLE `t1` (
+`id` int(11) default NULL
+) ENGINE=BLACKHOLE DEFAULT CHARSET=latin1 PARTITION BY HASH (id) ;
+SELECT * FROM t1;
+id
+drop table t1;
 CREATE TABLE `t1` (
 `id` int(11) default NULL
 ) ENGINE=BLACKHOLE DEFAULT CHARSET=latin1 PARTITION BY HASH (id) ;
@@ -119,8 +242,8 @@
 partition by range (a)
 ( partition p0 values less than(10),
 partition p1 values less than (20),
-partition p2 values less than maxvalue);
-alter table t1 reorganise partition p2 into (partition p2 values less than (30));
+partition p2 values less than (25));
+alter table t1 reorganize partition p2 into (partition p2 values less than (30));
 show create table t1;
 Table	Create Table
 t1	CREATE TABLE `t1` (
@@ -139,7 +262,7 @@
 PARTITION x7 VALUES LESS THAN (16),
 PARTITION x8 VALUES LESS THAN (18),
 PARTITION x9 VALUES LESS THAN (20));
-ALTER TABLE t1 REORGANISE PARTITION x0,x1,x2 INTO
+ALTER TABLE t1 REORGANIZE PARTITION x0,x1,x2 INTO
 (PARTITION x1 VALUES LESS THAN (6));
 show create table t1;
 Table	Create Table

--- 1.10/mysql-test/t/partition.test	2006-01-04 11:08:49 +03:00
+++ 1.11/mysql-test/t/partition.test	2006-01-18 14:09:00 +03:00
@@ -8,6 +8,7 @@
 --disable_warnings
 drop table if exists t1;
 --enable_warnings
+
 #
 # Partition by key no partition defined => OK
 #
@@ -97,6 +98,9 @@
  partition x2 tablespace ts2,
  partition x3 tablespace ts3);
 
+CREATE TABLE t2 LIKE t1;
+
+drop table t2;
 drop table t1;
 
 #
@@ -163,6 +167,141 @@
 drop table t1;
 
 #
+# Bug #13644 DROP PARTITION NULL's DATE column
+#
+CREATE TABLE t1 (a int, name VARCHAR(50), purchased DATE)
+PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (3),
+ PARTITION p1 VALUES LESS THAN (7),
+ PARTITION p2 VALUES LESS THAN (9),
+ PARTITION p3 VALUES LESS THAN (11));
+INSERT INTO t1 VALUES
+(1, 'desk organiser', '2003-10-15'),
+(2, 'CD player', '1993-11-05'),
+(3, 'TV set', '1996-03-10'),
+(4, 'bookcase', '1982-01-10'),
+(5, 'exercise bike', '2004-05-09'),
+(6, 'sofa', '1987-06-05'),
+(7, 'popcorn maker', '2001-11-22'),
+(8, 'acquarium', '1992-08-04'),
+(9, 'study desk', '1984-09-16'),
+(10, 'lava lamp', '1998-12-25');
+
+SELECT * from t1 ORDER BY a;
+ALTER TABLE t1 DROP PARTITION p0;
+SELECT * from t1 ORDER BY a;
+
+drop table t1;
+
+#
+# Bug #13442; Truncate Partitioned table doesn't work
+#
+
+CREATE TABLE t1 (a int)
+PARTITION BY LIST (a)
+(PARTITION p0 VALUES IN (1,2,3), PARTITION p1 VALUES IN (4,5,6));
+
+insert into t1 values (1),(2),(3),(4),(5),(6);
+select * from t1;
+truncate t1;
+select * from t1;
+truncate t1;
+select * from t1;
+drop table t1;
+
+#
+# Bug #13445 Partition by KEY method crashes server
+#
+CREATE TABLE t1 (a int, b int, primary key(a,b))
+PARTITION BY KEY(b,a) PARTITIONS 4;
+
+insert into t1 values (0,0),(1,1),(2,2),(3,3),(4,4),(5,5),(6,6);
+select * from t1 where a = 4;
+
+drop table t1;
+
+#
+# Bug #13438: Engine clause in PARTITION clause causes crash
+#
+CREATE TABLE t1 (a int)
+PARTITION BY LIST (a)
+PARTITIONS 1
+(PARTITION x1 VALUES IN (1) ENGINE=MEMORY);
+
+show create table t1;
+drop table t1;
+
+#
+# Bug #13440: REPLACE causes crash in partitioned table
+#
+CREATE TABLE t1 (a int, unique(a))
+PARTITION BY LIST (a)
+(PARTITION x1 VALUES IN (10), PARTITION x2 VALUES IN (20));
+
+--error ER_NO_PARTITION_FOR_GIVEN_VALUE 
+REPLACE t1 SET a = 4;
+drop table t1;
+
+#
+# Bug #14365: Crash if value too small in list partitioned table
+#
+CREATE TABLE t1 (a int)
+PARTITION BY LIST (a)
+(PARTITION x1 VALUES IN (2), PARTITION x2 VALUES IN (3));
+
+insert into t1 values (2), (3);
+--error ER_NO_PARTITION_FOR_GIVEN_VALUE
+insert into t1 values (4);
+--error ER_NO_PARTITION_FOR_GIVEN_VALUE
+insert into t1 values (1);
+drop table t1;
+
+#
+# Bug 14327: PARTITIONS clause gets lost in SHOW CREATE TABLE
+#
+CREATE TABLE t1 (a int)
+PARTITION BY HASH(a)
+PARTITIONS 5;
+
+SHOW CREATE TABLE t1;
+
+drop table t1;
+
+#
+# Bug #13446: Update to value outside of list values doesn't give error
+#
+CREATE TABLE t1 (a int)
+PARTITION BY RANGE (a)
+(PARTITION x1 VALUES LESS THAN (2));
+
+insert into t1 values (1);
+--error ER_NO_PARTITION_FOR_GIVEN_VALUE
+update t1 set a = 5;
+
+drop table t1;
+
+#
+# Bug #13441: Analyze on partitioned table didn't work
+#
+CREATE TABLE t1 (a int)
+PARTITION BY LIST (a)
+(PARTITION x1 VALUES IN (10), PARTITION x2 VALUES IN (20));
+
+analyze table t1;
+
+drop table t1;
+
+#
+# BUG 14524
+#
+CREATE TABLE `t1` (
+  `id` int(11) default NULL
+) ENGINE=BLACKHOLE DEFAULT CHARSET=latin1 PARTITION BY HASH (id) ;
+SELECT * FROM t1;
+
+drop table t1;
+
+#
 # BUG 14524
 #
 CREATE TABLE `t1` (
@@ -180,9 +319,9 @@
 partition by range (a)
   ( partition p0 values less than(10),
     partition p1 values less than (20),
-    partition p2 values less than maxvalue);
+    partition p2 values less than (25));
 
-alter table t1 reorganise partition p2 into (partition p2 values less than (30));
+alter table t1 reorganize partition p2 into (partition p2 values less than (30));
 show create table t1;
 drop table t1;
 
@@ -199,7 +338,7 @@
  PARTITION x8 VALUES LESS THAN (18),
  PARTITION x9 VALUES LESS THAN (20));
 
-ALTER TABLE t1 REORGANISE PARTITION x0,x1,x2 INTO
+ALTER TABLE t1 REORGANIZE PARTITION x0,x1,x2 INTO
 (PARTITION x1 VALUES LESS THAN (6));
 show create table t1;
 drop table t1;

--- 1.22/sql/sql_partition.cc	2006-01-05 17:46:58 +03:00
+++ 1.23/sql/sql_partition.cc	2006-01-18 14:09:00 +03:00
@@ -43,13 +43,18 @@
 /*
   Partition related functions declarations and some static constants;
 */
-static const char *hash_str= "HASH";
-static const char *range_str= "RANGE";
-static const char *list_str= "LIST";
+const LEX_STRING partition_keywords[]=
+{
+  { (char *) STRING_WITH_LEN("HASH") },
+  { (char *) STRING_WITH_LEN("RANGE") },
+  { (char *) STRING_WITH_LEN("LIST") }, 
+  { (char *) STRING_WITH_LEN("KEY") },
+  { (char *) STRING_WITH_LEN("MAXVALUE") },
+  { (char *) STRING_WITH_LEN("LINEAR ") }
+};
 static const char *part_str= "PARTITION";
 static const char *sub_str= "SUB";
 static const char *by_str= "BY";
-static const char *key_str= "KEY";
 static const char *space_str= " ";
 static const char *equal_str= "=";
 static const char *end_paren_str= ")";
@@ -57,34 +62,48 @@
 static const char *comma_str= ",";
 static char buff[22];
 
-bool get_partition_id_list(partition_info *part_info,
-                           uint32 *part_id);
-bool get_partition_id_range(partition_info *part_info,
-                            uint32 *part_id);
-bool get_partition_id_hash_nosub(partition_info *part_info,
-                                 uint32 *part_id);
-bool get_partition_id_key_nosub(partition_info *part_info,
-                                uint32 *part_id);
-bool get_partition_id_linear_hash_nosub(partition_info *part_info,
-                                        uint32 *part_id);
-bool get_partition_id_linear_key_nosub(partition_info *part_info,
-                                       uint32 *part_id);
-bool get_partition_id_range_sub_hash(partition_info *part_info,
-                                     uint32 *part_id);
-bool get_partition_id_range_sub_key(partition_info *part_info,
-                                    uint32 *part_id);
-bool get_partition_id_range_sub_linear_hash(partition_info *part_info,
-                                            uint32 *part_id);
-bool get_partition_id_range_sub_linear_key(partition_info *part_info,
-                                           uint32 *part_id);
-bool get_partition_id_list_sub_hash(partition_info *part_info,
-                                    uint32 *part_id);
-bool get_partition_id_list_sub_key(partition_info *part_info,
-                                   uint32 *part_id);
-bool get_partition_id_list_sub_linear_hash(partition_info *part_info,
-                                           uint32 *part_id);
-bool get_partition_id_list_sub_linear_key(partition_info *part_info,
-                                          uint32 *part_id);
+int get_partition_id_list(partition_info *part_info,
+                           uint32 *part_id,
+                           longlong *func_value);
+int get_partition_id_range(partition_info *part_info,
+                            uint32 *part_id,
+                            longlong *func_value);
+int get_partition_id_hash_nosub(partition_info *part_info,
+                                 uint32 *part_id,
+                                 longlong *func_value);
+int get_partition_id_key_nosub(partition_info *part_info,
+                                uint32 *part_id,
+                                longlong *func_value);
+int get_partition_id_linear_hash_nosub(partition_info *part_info,
+                                        uint32 *part_id,
+                                        longlong *func_value);
+int get_partition_id_linear_key_nosub(partition_info *part_info,
+                                       uint32 *part_id,
+                                       longlong *func_value);
+int get_partition_id_range_sub_hash(partition_info *part_info,
+                                     uint32 *part_id,
+                                     longlong *func_value);
+int get_partition_id_range_sub_key(partition_info *part_info,
+                                    uint32 *part_id,
+                                    longlong *func_value);
+int get_partition_id_range_sub_linear_hash(partition_info *part_info,
+                                            uint32 *part_id,
+                                            longlong *func_value);
+int get_partition_id_range_sub_linear_key(partition_info *part_info,
+                                           uint32 *part_id,
+                                           longlong *func_value);
+int get_partition_id_list_sub_hash(partition_info *part_info,
+                                    uint32 *part_id,
+                                    longlong *func_value);
+int get_partition_id_list_sub_key(partition_info *part_info,
+                                   uint32 *part_id,
+                                   longlong *func_value);
+int get_partition_id_list_sub_linear_hash(partition_info *part_info,
+                                           uint32 *part_id,
+                                           longlong *func_value);
+int get_partition_id_list_sub_linear_key(partition_info *part_info,
+                                          uint32 *part_id,
+                                          longlong *func_value);
 uint32 get_partition_id_hash_sub(partition_info *part_info); 
 uint32 get_partition_id_key_sub(partition_info *part_info); 
 uint32 get_partition_id_linear_hash_sub(partition_info *part_info); 
@@ -110,12 +129,15 @@
 /*
   A routine used by the parser to decide whether we are specifying a full
   partitioning or if only partitions to add or to split.
+
   SYNOPSIS
     is_partition_management()
     lex                    Reference to the lex object
+
   RETURN VALUE
     TRUE                   Yes, it is part of a management partition command
     FALSE                  No, not a management partition command
+
   DESCRIPTION
     This needs to be outside of WITH_PARTITION_STORAGE_ENGINE since it is
     used from the sql parser that doesn't have any #ifdef's
@@ -125,31 +147,34 @@
 {
   return (lex->sql_command == SQLCOM_ALTER_TABLE &&
           (lex->alter_info.flags == ALTER_ADD_PARTITION ||
-           lex->alter_info.flags == ALTER_REORGANISE_PARTITION));
+           lex->alter_info.flags == ALTER_REORGANIZE_PARTITION));
 }
 
 #ifdef WITH_PARTITION_STORAGE_ENGINE
 /*
-  A support function to check if a partition name is in a list of strings
+  A support function to check if a name is in a list of strings
+
   SYNOPSIS
-    is_partition_in_list()
-    part_name          String searched for
-    list_part_names    A list of names searched in
+    is_name_in_list()
+    name               String searched for
+    list_names         A list of names searched in
+
   RETURN VALUES
     TRUE               String found
     FALSE              String not found
 */
 
-bool is_partition_in_list(char *part_name,
-                          List<char> list_part_names)
+bool is_name_in_list(char *name,
+                          List<char> list_names)
 {
-  List_iterator<char> part_names_it(list_part_names);
-  uint no_names= list_part_names.elements;
+  List_iterator<char> names_it(list_names);
+  uint no_names= list_names.elements;
   uint i= 0;
+
   do
   {
-    char *list_name= part_names_it++;
-    if (!(my_strcasecmp(system_charset_info, part_name, list_name)))
+    char *list_name= names_it++;
+    if (!(my_strcasecmp(system_charset_info, name, list_name)))
       return TRUE;
   } while (++i < no_names);
   return FALSE;
@@ -159,47 +184,99 @@
 /*
   A support function to check partition names for duplication in a
   partitioned table
+
   SYNOPSIS
-    is_partitions_in_table()
+    are_partitions_in_table()
     new_part_info      New partition info
     old_part_info      Old partition info
+
   RETURN VALUES
     TRUE               Duplicate names found
     FALSE              Duplicate names not found
+
   DESCRIPTION
     Can handle that the new and old parts are the same in which case it
     checks that the list of names in the partitions doesn't contain any
     duplicated names.
 */
 
-bool is_partitions_in_table(partition_info *new_part_info,
-                            partition_info *old_part_info)
+char *are_partitions_in_table(partition_info *new_part_info,
+                              partition_info *old_part_info)
 {
-  uint no_new_parts= new_part_info->partitions.elements, new_count;
-  uint no_old_parts= old_part_info->partitions.elements, old_count;
+  uint no_new_parts= new_part_info->partitions.elements;
+  uint no_old_parts= old_part_info->partitions.elements;
+  uint new_count, old_count;
   List_iterator<partition_element> new_parts_it(new_part_info->partitions);
-  bool same_part_info= (new_part_info == old_part_info);
-  DBUG_ENTER("is_partitions_in_table");
+  bool is_same_part_info= (new_part_info == old_part_info);
+  DBUG_ENTER("are_partitions_in_table");
+  DBUG_PRINT("enter", ("%u", no_new_parts));
 
   new_count= 0;
   do
   {
     List_iterator<partition_element> old_parts_it(old_part_info->partitions);
     char *new_name= (new_parts_it++)->partition_name;
+    DBUG_PRINT("info", ("%s", new_name));
     new_count++;
     old_count= 0;
     do
     {
       char *old_name= (old_parts_it++)->partition_name;
       old_count++;
-      if (same_part_info && old_count == new_count)
+      if (is_same_part_info && old_count == new_count)
         break;
       if (!(my_strcasecmp(system_charset_info, old_name, new_name)))
       {
-        DBUG_RETURN(TRUE);
+        DBUG_PRINT("info", ("old_name = %s, not ok", old_name));
+        DBUG_RETURN(old_name);
       }
     } while (old_count < no_old_parts);
   } while (new_count < no_new_parts);
+  DBUG_RETURN(NULL);
+}
+
+/*
+  Set-up defaults for partitions. 
+
+  SYNOPSIS
+    partition_default_handling()
+    table                         Table object
+    table_name                    Table name to use when getting no_parts
+    db_name                       Database name to use when getting no_parts
+    part_info                     Partition info to set up
+
+  RETURN VALUES
+    TRUE                          Error
+    FALSE                         Success
+*/
+
+bool partition_default_handling(TABLE *table, partition_info *part_info)
+{
+  DBUG_ENTER("partition_default_handling");
+
+  if (part_info->use_default_no_partitions)
+  {
+    if (table->file->get_no_parts(table->s->normalized_path.str,
+                                  &part_info->no_parts))
+    {
+      DBUG_RETURN(TRUE);
+    }
+  }
+  else if (is_sub_partitioned(part_info) &&
+           part_info->use_default_no_subpartitions)
+  {
+    uint no_parts;
+    if (table->file->get_no_parts(table->s->normalized_path.str,
+                                  &no_parts))
+    {
+      DBUG_RETURN(TRUE);
+    }
+    DBUG_ASSERT(part_info->no_parts > 0);
+    part_info->no_subparts= no_parts / part_info->no_parts;
+    DBUG_ASSERT((no_parts % part_info->no_parts) == 0);
+  }
+  set_up_defaults_for_partitioning(part_info, table->file,
+                                   (ulonglong)0, (uint)0);
   DBUG_RETURN(FALSE);
 }
 
@@ -250,7 +327,7 @@
         break;
       if (!(my_strcasecmp(system_charset_info, old_name, new_name)))
       {
-        if (!is_partition_in_list(old_name, list_part_names))
+        if (!is_name_in_list(old_name, list_part_names))
           DBUG_RETURN(TRUE);
       }
     } while (old_count < no_old_parts);
@@ -262,36 +339,35 @@
 /*
   A useful routine used by update_row for partition handlers to calculate
   the partition ids of the old and the new record.
+
   SYNOPSIS
     get_part_for_update()
     old_data                Buffer of old record
     new_data                Buffer of new record
     rec0                    Reference to table->record[0]
     part_info               Reference to partition information
-    part_field_array        A NULL-terminated array of fields for partition
-                            function
-    old_part_id             The returned partition id of old record 
-    new_part_id             The returned partition id of new record 
+    out:old_part_id         The returned partition id of old record 
+    out:new_part_id         The returned partition id of new record
+
   RETURN VALUE
     0                       Success
     > 0                     Error code
-  DESCRIPTION
-    Dependent on whether buf is not record[0] we need to prepare the
-    fields. Then we call the function pointer get_partition_id to
-    calculate the partition ids.
 */
 
 int get_parts_for_update(const byte *old_data, byte *new_data,
                          const byte *rec0, partition_info *part_info,
-                         uint32 *old_part_id, uint32 *new_part_id)
+                         uint32 *old_part_id, uint32 *new_part_id,
+                         longlong *new_func_value)
 {
   Field **part_field_array= part_info->full_part_field_array;
   int error;
+  longlong old_func_value;
   DBUG_ENTER("get_parts_for_update");
-  DBUG_ASSERT(new_data == rec0);
 
+  DBUG_ASSERT(new_data == rec0);
   set_field_ptr(part_field_array, old_data, rec0);
-  error= part_info->get_partition_id(part_info, old_part_id);
+  error= part_info->get_partition_id(part_info, old_part_id,
+                                     &old_func_value);
   set_field_ptr(part_field_array, rec0, old_data);
   if (unlikely(error))                             // Should never happen
   {
@@ -302,7 +378,9 @@
   if (new_data == rec0)
 #endif
   {
-    if (unlikely(error= part_info->get_partition_id(part_info,new_part_id)))
+    if (unlikely(error= part_info->get_partition_id(part_info,
+                                                    new_part_id,
+                                                    new_func_value)))
     {
       DBUG_RETURN(error);
     }
@@ -316,7 +394,8 @@
       condition is false in one test situation before pushing the code.
     */
     set_field_ptr(part_field_array, new_data, rec0);
-    error= part_info->get_partition_id(part_info, new_part_id);
+    error= part_info->get_partition_id(part_info, new_part_id,
+                                       new_func_value);
     set_field_ptr(part_field_array, rec0, new_data);
     if (unlikely(error))
     {
@@ -331,17 +410,18 @@
 /*
   A useful routine used by delete_row for partition handlers to calculate
   the partition id.
+
   SYNOPSIS
     get_part_for_delete()
     buf                     Buffer of old record
     rec0                    Reference to table->record[0]
     part_info               Reference to partition information
-    part_field_array        A NULL-terminated array of fields for partition
-                            function
-    part_id                 The returned partition id to delete from
+    out:part_id             The returned partition id to delete from
+
   RETURN VALUE
     0                       Success
     > 0                     Error code
+
   DESCRIPTION
     Dependent on whether buf is not record[0] we need to prepare the
     fields. Then we call the function pointer get_partition_id to
@@ -352,11 +432,13 @@
                         partition_info *part_info, uint32 *part_id)
 {
   int error;
+  longlong func_value;
   DBUG_ENTER("get_part_for_delete");
 
   if (likely(buf == rec0))
   {
-    if (unlikely((error= part_info->get_partition_id(part_info, part_id))))
+    if (unlikely((error= part_info->get_partition_id(part_info, part_id,
+                                                     &func_value))))
     {
       DBUG_RETURN(error);
     }
@@ -366,7 +448,7 @@
   {
     Field **part_field_array= part_info->full_part_field_array;
     set_field_ptr(part_field_array, buf, rec0);
-    error= part_info->get_partition_id(part_info, part_id);
+    error= part_info->get_partition_id(part_info, part_id, &func_value);
     set_field_ptr(part_field_array, rec0, buf);
     if (unlikely(error))
     {
@@ -383,12 +465,15 @@
   check what partition a certain value belongs to. At the same time it does
   also check that the range constants are defined in increasing order and
   that the expressions are constant integer expressions.
+
   SYNOPSIS
     check_range_constants()
-      part_info
+    part_info             Partition info
+
   RETURN VALUE
     TRUE                An error occurred during creation of range constants
     FALSE               Successful creation of range constant mapping
+
   DESCRIPTION
     This routine is called from check_partition_info to get a quick error
     before we came too far into the CREATE TABLE process. It is also called
@@ -399,8 +484,10 @@
 static bool check_range_constants(partition_info *part_info)
 {
   partition_element* part_def;
-  longlong current_largest_int= LONGLONG_MIN, part_range_value_int;
-  uint no_parts= part_info->no_parts, i;
+  longlong current_largest_int= LONGLONG_MIN;
+  longlong part_range_value_int;
+  uint no_parts= part_info->no_parts;
+  uint i;
   List_iterator<partition_element> it(part_info->partitions);
   bool result= TRUE;
   DBUG_ENTER("check_range_constants");
@@ -411,7 +498,7 @@
                       (longlong*)sql_alloc(no_parts * sizeof(longlong));
   if (unlikely(part_info->range_int_array == NULL))
   {
-    my_error(ER_OUTOFMEMORY, MYF(0), no_parts*sizeof(longlong));
+    mem_alloc_error(no_parts * sizeof(longlong));
     goto end;
   }
   i= 0;
@@ -442,10 +529,12 @@
 /*
   A support routine for check_list_constants used by qsort to sort the
   constant list expressions.
+
   SYNOPSIS
     list_part_cmp()
       a                First list constant to compare with
       b                Second list constant to compare with
+
   RETURN VALUE
     +1                 a > b
     0                  a  == b
@@ -454,9 +543,8 @@
 
 static int list_part_cmp(const void* a, const void* b)
 {
-  longlong a1, b1;
-  a1= ((LIST_PART_ENTRY*)a)->list_value;
-  b1= ((LIST_PART_ENTRY*)b)->list_value;
+  longlong a1= ((LIST_PART_ENTRY*)a)->list_value;
+  longlong b1= ((LIST_PART_ENTRY*)b)->list_value;
   if (a1 < b1)
     return -1;
   else if (a1 > b1)
@@ -471,12 +559,15 @@
   check what partition a certain value belongs to. At the same time it does
   also check that there are no duplicates among the list constants and that
   that the list expressions are constant integer expressions.
+
   SYNOPSIS
     check_list_constants()
-      part_info
+    part_info             Partition info
+
   RETURN VALUE
     TRUE                  An error occurred during creation of list constants
     FALSE                 Successful creation of list constant mapping
+
   DESCRIPTION
     This routine is called from check_partition_info to get a quick error
     before we came too far into the CREATE TABLE process. It is also called
@@ -486,9 +577,12 @@
 
 static bool check_list_constants(partition_info *part_info)
 {
-  uint i, no_list_values= 0, no_parts, list_index= 0;
+  uint i, no_parts;
+  uint no_list_values= 0;
+  uint list_index= 0;
   longlong *list_value;
-  bool not_first, result= TRUE;
+  bool not_first;
+  bool result= TRUE;
   longlong curr_value, prev_value;
   partition_element* part_def;
   List_iterator<partition_element> list_func_it(part_info->partitions);
@@ -526,7 +620,7 @@
       (LIST_PART_ENTRY*)sql_alloc(no_list_values*sizeof(LIST_PART_ENTRY));
   if (unlikely(part_info->list_array == NULL))
   {
-    my_error(ER_OUTOFMEMORY, MYF(0), no_list_values*sizeof(LIST_PART_ENTRY));
+    mem_alloc_error(no_list_values * sizeof(LIST_PART_ENTRY));
     goto end;
   }
 
@@ -570,12 +664,16 @@
 /*
   Create a memory area where default partition names are stored and fill it
   up with the names.
+
   SYNOPSIS
     create_default_partition_names()
     no_parts                        Number of partitions
+    start_no                        Starting partition number
     subpart                         Is it subpartitions
+
   RETURN VALUE
     A pointer to the memory area of the default partition names
+
   DESCRIPTION
     A support routine for the partition code where default values are
     generated.
@@ -585,17 +683,18 @@
 #define MAX_PART_NAME_SIZE 8
 
 static char *create_default_partition_names(uint no_parts, uint start_no,
-                                            bool subpart)
+                                            bool is_subpart)
 {
   char *ptr= sql_calloc(no_parts*MAX_PART_NAME_SIZE);
   char *move_ptr= ptr;
   uint i= 0;
   DBUG_ENTER("create_default_partition_names");
+
   if (likely(ptr != 0))
   {
     do
     {
-      if (subpart)
+      if (is_subpart)
         my_sprintf(move_ptr, (move_ptr,"sp%u", (start_no + i)));
       else
         my_sprintf(move_ptr, (move_ptr,"p%u", (start_no + i)));
@@ -604,7 +703,7 @@
   }
   else
   {
-    my_error(ER_OUTOFMEMORY, MYF(0), no_parts*MAX_PART_NAME_SIZE);
+    mem_alloc_error(no_parts*MAX_PART_NAME_SIZE);
   }
   DBUG_RETURN(ptr);
 }
@@ -614,14 +713,18 @@
   Set up all the default partitions not set-up by the user in the SQL
   statement. Also perform a number of checks that the user hasn't tried
   to use default values where no defaults exists.
+
   SYNOPSIS
     set_up_default_partitions()
     part_info           The reference to all partition information
     file                A reference to a handler of the table
     max_rows            Maximum number of rows stored in the table
+    start_no            Starting partition number
+
   RETURN VALUE
     TRUE                Error, attempted default values not possible
     FALSE               Ok, default partitions set-up
+
   DESCRIPTION
     The routine uses the underlying handler of the partitioning to define
     the default number of partitions. For some handlers this requires
@@ -644,16 +747,15 @@
   {
     const char *error_string;
     if (part_info->part_type == RANGE_PARTITION)
-      error_string= range_str;
+      error_string= partition_keywords[PKW_RANGE].str;
     else
-      error_string= list_str;
+      error_string= partition_keywords[PKW_LIST].str;
     my_error(ER_PARTITIONS_MUST_BE_DEFINED_ERROR, MYF(0), error_string);
     goto end;
   }
   if (part_info->no_parts == 0)
     part_info->no_parts= file->get_default_no_partitions(max_rows);
   no_parts= part_info->no_parts;
-  part_info->use_default_partitions= FALSE;
   if (unlikely(no_parts > MAX_PARTITIONS))
   {
     my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0));
@@ -667,16 +769,16 @@
   do
   {
     partition_element *part_elem= new partition_element();
-    if (likely(part_elem != 0))
+    if (likely(part_elem != 0 &&
+               (!part_info->partitions.push_back(part_elem))))
     {
-      part_elem->engine_type= NULL;
+      part_elem->engine_type= part_info->default_engine_type;
       part_elem->partition_name= default_name;
       default_name+=MAX_PART_NAME_SIZE;
-      part_info->partitions.push_back(part_elem);
     }
     else
     {
-      my_error(ER_OUTOFMEMORY, MYF(0), sizeof(partition_element));
+      mem_alloc_error(sizeof(partition_element));
       goto end;
     }
   } while (++i < no_parts);
@@ -690,14 +792,17 @@
   Set up all the default subpartitions not set-up by the user in the SQL
   statement. Also perform a number of checks that the default partitioning
   becomes an allowed partitioning scheme.
+
   SYNOPSIS
     set_up_default_subpartitions()
     part_info           The reference to all partition information
     file                A reference to a handler of the table
     max_rows            Maximum number of rows stored in the table
+
   RETURN VALUE
     TRUE                Error, attempted default values not possible
     FALSE               Ok, default partitions set-up
+
   DESCRIPTION
     The routine uses the underlying handler of the partitioning to define
     the default number of partitions. For some handlers this requires
@@ -721,7 +826,6 @@
     part_info->no_subparts= file->get_default_no_partitions(max_rows);
   no_parts= part_info->no_parts;
   no_subparts= part_info->no_subparts;
-  part_info->use_default_subpartitions= FALSE;
   if (unlikely((no_parts * no_subparts) > MAX_PARTITIONS))
   {
     my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0));
@@ -739,16 +843,16 @@
     do
     {
       partition_element *subpart_elem= new partition_element();
-      if (likely(subpart_elem != 0))
+      if (likely(subpart_elem != 0 &&
+          (!part_elem->subpartitions.push_back(subpart_elem))))
       {
-        subpart_elem->engine_type= NULL;
+        subpart_elem->engine_type= part_info->default_engine_type;
         subpart_elem->partition_name= name_ptr;
         name_ptr+= MAX_PART_NAME_SIZE;
-        part_elem->subpartitions.push_back(subpart_elem);
       }
       else
       {
-        my_error(ER_OUTOFMEMORY, MYF(0), sizeof(partition_element));
+        mem_alloc_error(sizeof(partition_element));
         goto end;
       }
     } while (++j < no_subparts);
@@ -760,18 +864,22 @@
 
 
 /*
-  Set up defaults for partition or subpartition (cannot set-up for both,
-  this will return an error.
+  Support routine for check_partition_info
+
   SYNOPSIS
     set_up_defaults_for_partitioning()
     part_info           The reference to all partition information
     file                A reference to a handler of the table
     max_rows            Maximum number of rows stored in the table
+    start_no            Starting partition number
+
   RETURN VALUE
     TRUE                Error, attempted default values not possible
     FALSE               Ok, default partitions set-up
+
   DESCRIPTION
-    Support routine for check_partition_info
+    Set up defaults for partition or subpartition (cannot set-up for both,
+    this will return an error.
 */
 
 bool set_up_defaults_for_partitioning(partition_info *part_info,
@@ -780,11 +888,15 @@
 {
   DBUG_ENTER("set_up_defaults_for_partitioning");
 
-  if (part_info->use_default_partitions)
-    DBUG_RETURN(set_up_default_partitions(part_info, file, max_rows,
-                                          start_no));
-  if (is_sub_partitioned(part_info) && part_info->use_default_subpartitions)
-    DBUG_RETURN(set_up_default_subpartitions(part_info, file, max_rows));
+  if (!part_info->default_partitions_setup)
+  {
+    part_info->default_partitions_setup= TRUE;
+    if (part_info->use_default_partitions)
+      DBUG_RETURN(set_up_default_partitions(part_info, file, max_rows,
+                                            start_no));
+    if (is_sub_partitioned(part_info) && part_info->use_default_subpartitions)
+      DBUG_RETURN(set_up_default_subpartitions(part_info, file, max_rows));
+  }
   DBUG_RETURN(FALSE);
 }
 
@@ -792,21 +904,22 @@
 /*
   Check that all partitions use the same storage engine.
   This is currently a limitation in this version.
+
   SYNOPSIS
     check_engine_mix()
     engine_array           An array of engine identifiers
     no_parts               Total number of partitions
+
   RETURN VALUE
     TRUE                   Error, mixed engines
     FALSE                  Ok, no mixed engines
+  DESCRIPTION
+    Current check verifies only that all handlers are the same.
+    Later this check will be more sophisticated.
 */
 
 static bool check_engine_mix(handlerton **engine_array, uint no_parts)
 {
-  /*
-    Current check verifies only that all handlers are the same.
-    Later this check will be more sophisticated.
-  */
   uint i= 0;
   bool result= FALSE;
   DBUG_ENTER("check_engine_mix");
@@ -824,31 +937,35 @@
 
 
 /*
-  We will check that the partition info requested is possible to set-up in
-  this version. This routine is an extension of the parser one could say.
-  If defaults were used we will generate default data structures for all
-  partitions.
+  This code is used early in the CREATE TABLE and ALTER TABLE process.
+
   SYNOPSIS
     check_partition_info()
     part_info           The reference to all partition information
-    db_type             Default storage engine if no engine specified per
-                        partition.
     file                A reference to a handler of the table
     max_rows            Maximum number of rows stored in the table
+    engine_type         Return value for used engine in partitions
+
   RETURN VALUE
     TRUE                 Error, something went wrong
     FALSE                Ok, full partition data structures are now generated
+
   DESCRIPTION
-    This code is used early in the CREATE TABLE and ALTER TABLE process.
+    We will check that the partition info requested is possible to set-up in
+    this version. This routine is an extension of the parser one could say.
+    If defaults were used we will generate default data structures for all
+    partitions.
+
 */
 
-bool check_partition_info(partition_info *part_info,handlerton *eng_type,
+bool check_partition_info(partition_info *part_info,handlerton **eng_type,
                           handler *file, ulonglong max_rows)
 {
   handlerton **engine_array= NULL;
-  uint part_count= 0, i, no_parts, tot_partitions;
+  uint part_count= 0;
+  uint i, no_parts, tot_partitions;
   bool result= TRUE;
-  List_iterator<partition_element> part_it(part_info->partitions);
+  char *same_name;
   DBUG_ENTER("check_partition_info");
 
   if (unlikely(is_sub_partitioned(part_info) &&
@@ -868,9 +985,10 @@
     my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0));
     goto end;
   }
-  if (unlikely(is_partitions_in_table(part_info, part_info)))
+  if (((same_name= are_partitions_in_table(part_info,
+                                           part_info))))
   {
-    my_error(ER_SAME_NAME_PARTITION, MYF(0));
+    my_error(ER_SAME_NAME_PARTITION, MYF(0), same_name);
     goto end;
   }
   engine_array= (handlerton**)my_malloc(tot_partitions * sizeof(handlerton *), 
@@ -879,36 +997,44 @@
     goto end;
   i= 0;
   no_parts= part_info->no_parts;
-  do
   {
-    partition_element *part_elem= part_it++;
-    if (!is_sub_partitioned(part_info))
-    {
-      if (part_elem->engine_type == NULL)
-        part_elem->engine_type= eng_type;
-      DBUG_PRINT("info", ("engine = %s", part_elem->engine_type->name));
-      engine_array[part_count++]= part_elem->engine_type;
-    }
-    else
+    List_iterator<partition_element> part_it(part_info->partitions);
+    do
     {
-      uint j= 0, no_subparts= part_info->no_subparts;;
-      List_iterator<partition_element> sub_it(part_elem->subpartitions);
-      do
+      partition_element *part_elem= part_it++;
+      if (!is_sub_partitioned(part_info))
       {
-        part_elem= sub_it++;
         if (part_elem->engine_type == NULL)
-          part_elem->engine_type= eng_type;
-        DBUG_PRINT("info", ("engine = %s", part_elem->engine_type->name));
+          part_elem->engine_type= part_info->default_engine_type;
+        DBUG_PRINT("info", ("engine = %d",
+                   ha_legacy_type(part_elem->engine_type)));
         engine_array[part_count++]= part_elem->engine_type;
-      } while (++j < no_subparts);
-    }
-  } while (++i < part_info->no_parts);
+      }
+      else
+      {
+        uint j= 0, no_subparts= part_info->no_subparts;;
+        List_iterator<partition_element> sub_it(part_elem->subpartitions);
+        do
+        {
+          part_elem= sub_it++;
+          if (part_elem->engine_type == NULL)
+            part_elem->engine_type= part_info->default_engine_type;
+          DBUG_PRINT("info", ("engine = %u",
+                     ha_legacy_type(part_elem->engine_type)));
+          engine_array[part_count++]= part_elem->engine_type;
+        } while (++j < no_subparts);
+      }
+    } while (++i < part_info->no_parts);
+  }
   if (unlikely(check_engine_mix(engine_array, part_count)))
   {
     my_error(ER_MIX_HANDLER_ERROR, MYF(0));
     goto end;
   }
 
+  if (eng_type)
+    *eng_type= (handlerton*)engine_array[0];
+
   /*
     We need to check all constant expressions that they are of the correct
     type and that they are increasing for ranges and not overlapping for
@@ -928,51 +1054,54 @@
 
 
 /*
-  A great number of functions below here is part of the fix_partition_func
-  method. It is used to set up the partition structures for execution from
-  openfrm. It is called at the end of the openfrm when the table struct has
-  been set-up apart from the partition information.
-  It involves:
-  1) Setting arrays of fields for the partition functions.
-  2) Setting up binary search array for LIST partitioning
-  3) Setting up array for binary search for RANGE partitioning
-  4) Setting up key_map's to assist in quick evaluation whether one
-     can deduce anything from a given index of what partition to use
-  5) Checking whether a set of partitions can be derived from a range on
-     a field in the partition function.
-  As part of doing this there is also a great number of error controls.
-  This is actually the place where most of the things are checked for
-  partition information when creating a table.
-  Things that are checked includes
-  1) No NULLable fields in partition function
-  2) All fields of partition function in Primary keys and unique indexes
-     (if not supported)
-  3) No fields in partition function that are BLOB's or VARCHAR with a
-     collation other than the binary collation.
-
-
+  This method is used to set-up both partition and subpartitioning
+  field array and used for all types of partitioning.
+  It is part of the logic around fix_partition_func.
 
-  Create an array of partition fields (NULL terminated). Before this method
-  is called fix_fields or find_table_in_sef has been called to set
-  GET_FIXED_FIELDS_FLAG on all fields that are part of the partition
-  function.
   SYNOPSIS
     set_up_field_array()
     table                TABLE object for which partition fields are set-up
     sub_part             Is the table subpartitioned as well
+
   RETURN VALUE
     TRUE                 Error, some field didn't meet requirements
     FALSE                Ok, partition field array set-up
+
   DESCRIPTION
-    This method is used to set-up both partition and subpartitioning
-    field array and used for all types of partitioning.
-    It is part of the logic around fix_partition_func.
+
+    A great number of functions below here is part of the fix_partition_func
+    method. It is used to set up the partition structures for execution from
+    openfrm. It is called at the end of the openfrm when the table struct has
+    been set-up apart from the partition information.
+    It involves:
+    1) Setting arrays of fields for the partition functions.
+    2) Setting up binary search array for LIST partitioning
+    3) Setting up array for binary search for RANGE partitioning
+    4) Setting up key_map's to assist in quick evaluation whether one
+       can deduce anything from a given index of what partition to use
+    5) Checking whether a set of partitions can be derived from a range on
+       a field in the partition function.
+    As part of doing this there is also a great number of error controls.
+    This is actually the place where most of the things are checked for
+    partition information when creating a table.
+    Things that are checked includes
+    1) All fields of partition function in Primary keys and unique indexes
+       (if not supported)
+
+
+    Create an array of partition fields (NULL terminated). Before this method
+    is called fix_fields or find_table_in_sef has been called to set
+    GET_FIXED_FIELDS_FLAG on all fields that are part of the partition
+    function.
 */
+
 static bool set_up_field_array(TABLE *table,
-                              bool sub_part)
+                              bool is_sub_part)
 {
   Field **ptr, *field, **field_array;
-  uint no_fields= 0, size_field_array, i= 0;
+  uint no_fields= 0;
+  uint size_field_array;
+  uint i= 0;
   partition_info *part_info= table->part_info;
   int result= FALSE;
   DBUG_ENTER("set_up_field_array");
@@ -983,11 +1112,19 @@
     if (field->flags & GET_FIXED_FIELDS_FLAG)
       no_fields++;
   }
+  if (no_fields == 0)
+  {
+    /*
+      We are using hidden key as partitioning field
+    */
+    DBUG_ASSERT(!is_sub_part);
+    DBUG_RETURN(result);
+  }
   size_field_array= (no_fields+1)*sizeof(Field*);
   field_array= (Field**)sql_alloc(size_field_array);
   if (unlikely(!field_array))
   {
-    my_error(ER_OUTOFMEMORY, MYF(0), size_field_array);
+    mem_alloc_error(size_field_array);
     result= TRUE;
   }
   ptr= table->field;
@@ -1007,11 +1144,6 @@
           1) Not be a BLOB of any type
             A BLOB takes too long time to evaluate so we don't want it for
             performance reasons.
-          2) Not be a VARCHAR other than VARCHAR with a binary collation
-            A VARCHAR with character sets can have several values being
-            equal with different number of spaces or NULL's. This is not a
-            good ground for a safe and exact partition function. Thus it is
-            not allowed in partition functions.
         */
 
         if (unlikely(field->flags & BLOB_FLAG))
@@ -1019,17 +1151,11 @@
           my_error(ER_BLOB_FIELD_IN_PART_FUNC_ERROR, MYF(0));
           result= TRUE;
         }
-        else if (unlikely((!field->flags & BINARY_FLAG) &&
-                          field->real_type() == MYSQL_TYPE_VARCHAR))
-        {
-          my_error(ER_CHAR_SET_IN_PART_FIELD_ERROR, MYF(0));
-          result= TRUE;
-        }
       }
     }
   }
   field_array[no_fields]= 0;
-  if (!sub_part)
+  if (!is_sub_part)
   {
     part_info->part_field_array= field_array;
     part_info->no_part_fields= no_fields;
@@ -1046,13 +1172,16 @@
 /*
   Create a field array including all fields of both the partitioning and the
   subpartitioning functions.
+
   SYNOPSIS
     create_full_part_field_array()
     table                TABLE object for which partition fields are set-up
     part_info            Reference to partitioning data structure
+
   RETURN VALUE
     TRUE                 Memory allocation of field array failed
     FALSE                Ok
+
   DESCRIPTION
     If there is no subpartitioning then the same array is used as for the
     partitioning. Otherwise a new array is built up using the flag
@@ -1085,7 +1214,7 @@
     field_array= (Field**)sql_alloc(size_field_array);
     if (unlikely(!field_array))
     {
-      my_error(ER_OUTOFMEMORY, MYF(0), size_field_array);
+      mem_alloc_error(size_field_array);
       result= TRUE;
       goto end;
     }
@@ -1106,21 +1235,25 @@
 
 
 /*
-  These support routines is used to set/reset an indicator of all fields
-  in a certain key. It is used in conjunction with another support routine
-  that traverse all fields in the PF to find if all or some fields in the
-  PF is part of the key. This is used to check primary keys and unique
-  keys involve all fields in PF (unless supported) and to derive the
-  key_map's used to quickly decide whether the index can be used to
-  derive which partitions are needed to scan.
-
-
 
   Clear flag GET_FIXED_FIELDS_FLAG in all fields of a key previously set by
   set_indicator_in_key_fields (always used in pairs).
+
   SYNOPSIS
     clear_indicator_in_key_fields()
     key_info                  Reference to find the key fields
+
+  RETURN VALUE
+    NONE
+
+  DESCRIPTION
+    These support routines is used to set/reset an indicator of all fields
+    in a certain key. It is used in conjunction with another support routine
+    that traverse all fields in the PF to find if all or some fields in the
+    PF is part of the key. This is used to check primary keys and unique
+    keys involve all fields in PF (unless supported) and to derive the
+    key_map's used to quickly decide whether the index can be used to
+    derive which partitions are needed to scan.
 */
 
 static void clear_indicator_in_key_fields(KEY *key_info)
@@ -1134,9 +1267,13 @@
 
 /*
   Set flag GET_FIXED_FIELDS_FLAG in all fields of a key.
+
   SYNOPSIS
     set_indicator_in_key_fields
     key_info                  Reference to find the key fields
+
+  RETURN VALUE
+    NONE
 */
 
 static void set_indicator_in_key_fields(KEY *key_info)
@@ -1151,11 +1288,13 @@
 /*
   Check if all or some fields in partition field array is part of a key
   previously used to tag key fields.
+
   SYNOPSIS
     check_fields_in_PF()
     ptr                  Partition field array
-    all_fields           Is all fields of partition field array used in key
-    some_fields          Is some fields of partition field array used in key
+    out:all_fields       Is all fields of partition field array used in key
+    out:some_fields      Is some fields of partition field array used in key
+
   RETURN VALUE
     all_fields, some_fields
 */
@@ -1164,8 +1303,14 @@
                                bool *some_fields)
 {
   DBUG_ENTER("check_fields_in_PF");
+
   *all_fields= TRUE;
   *some_fields= FALSE;
+  if ((!ptr) || !(*ptr))
+  {
+    *all_fields= FALSE;
+    DBUG_VOID_RETURN;
+  }
   do
   {
   /* Check if the field of the PF is part of the current key investigated */
@@ -1181,9 +1326,13 @@
 /*
   Clear flag GET_FIXED_FIELDS_FLAG in all fields of the table.
   This routine is used for error handling purposes.
+
   SYNOPSIS
     clear_field_flag()
     table                TABLE object for which partition fields are set-up
+
+  RETURN VALUE
+    NONE
 */
 
 static void clear_field_flag(TABLE *table)
@@ -1198,35 +1347,42 @@
 
 
 /*
-  This routine sets-up the partition field array for KEY partitioning, it
-  also verifies that all fields in the list of fields is actually a part of
-  the table.
+  find_field_in_table_sef finds the field given its name. All fields get
+  GET_FIXED_FIELDS_FLAG set.
+
   SYNOPSIS
     handle_list_of_fields()
     it                   A list of field names for the partition function
     table                TABLE object for which partition fields are set-up
     part_info            Reference to partitioning data structure
     sub_part             Is the table subpartitioned as well
+
   RETURN VALUE
     TRUE                 Fields in list of fields not part of table
     FALSE                All fields ok and array created
+
   DESCRIPTION
-    find_field_in_table_sef finds the field given its name. All fields get
-    GET_FIXED_FIELDS_FLAG set.
+    This routine sets-up the partition field array for KEY partitioning, it
+    also verifies that all fields in the list of fields is actually a part of
+    the table.
+
 */
 
+
 static bool handle_list_of_fields(List_iterator<char> it,
                                   TABLE *table,
                                   partition_info *part_info,
-                                  bool sub_part)
+                                  bool is_sub_part)
 {
   Field *field;
   bool result;
   char *field_name;
+  bool is_list_empty= TRUE;
   DBUG_ENTER("handle_list_of_fields");
 
   while ((field_name= it++))
   {
+    is_list_empty= FALSE;
     field= find_field_in_table_sef(table, field_name);
     if (likely(field != 0))
       field->flags|= GET_FIXED_FIELDS_FLAG;
@@ -1238,19 +1394,54 @@
       goto end;
     }
   }
-  result= set_up_field_array(table, sub_part);
+  if (is_list_empty)
+  {
+    uint primary_key= table->s->primary_key;
+    if (primary_key != MAX_KEY)
+    {
+      uint no_key_parts= table->key_info[primary_key].key_parts, i;
+      /*
+        In the case of an empty list we use primary key as partition key.
+      */
+      for (i= 0; i < no_key_parts; i++)
+      {
+        Field *field= table->key_info[primary_key].key_part[i].field;
+        field->flags|= GET_FIXED_FIELDS_FLAG;
+      }
+    }
+    else
+    {
+      if (table->s->db_type->partition_flags &&
+          (table->s->db_type->partition_flags() & HA_USE_AUTO_PARTITION) &&
+          (table->s->db_type->partition_flags() & HA_CAN_PARTITION))
+      {
+        /*
+          This engine can handle automatic partitioning and there is no
+          primary key. In this case we rely on that the engine handles
+          partitioning based on a hidden key. Thus we allocate no
+          array for partitioning fields.
+        */
+        DBUG_RETURN(FALSE);
+      }
+      else
+      {
+        my_error(ER_FIELD_NOT_FOUND_PART_ERROR, MYF(0));
+        DBUG_RETURN(TRUE);
+      }
+    }
+  }
+  result= set_up_field_array(table, is_sub_part);
 end:
   DBUG_RETURN(result);
 }
 
 
 /*
-  This function is used to build an array of partition fields for the
-  partitioning function and subpartitioning function. The partitioning
-  function is an item tree that must reference at least one field in the
-  table. This is checked first in the parser that the function doesn't
-  contain non-cacheable parts (like a random function) and by checking
-  here that the function isn't a constant function.
+  The function uses a new feature in fix_fields where the flag 
+  GET_FIXED_FIELDS_FLAG is set for all fields in the item tree.
+  This field must always be reset before returning from the function
+  since it is used for other purposes as well.
+
   SYNOPSIS
     fix_fields_part_func()
     thd                  The thread object
@@ -1258,35 +1449,38 @@
     func_expr            The item tree reference of the partition function
     part_info            Reference to partitioning data structure
     sub_part             Is the table subpartitioned as well
+
   RETURN VALUE
     TRUE                 An error occurred, something was wrong with the
                          partition function.
     FALSE                Ok, a partition field array was created
+
   DESCRIPTION
-    The function uses a new feature in fix_fields where the flag 
-    GET_FIXED_FIELDS_FLAG is set for all fields in the item tree.
-    This field must always be reset before returning from the function
-    since it is used for other purposes as well.
-*/
+    This function is used to build an array of partition fields for the
+    partitioning function and subpartitioning function. The partitioning
+    function is an item tree that must reference at least one field in the
+    table. This is checked first in the parser that the function doesn't
+    contain non-cacheable parts (like a random function) and by checking
+    here that the function isn't a constant function.
 
-static bool fix_fields_part_func(THD *thd, TABLE_LIST *tables,
-                                 Item* func_expr, partition_info *part_info,
-                                 bool sub_part)
-{
-  /*
     Calculate the number of fields in the partition function.
     Use it allocate memory for array of Field pointers.
     Initialise array of field pointers. Use information set when
     calling fix_fields and reset it immediately after.
     The get_fields_in_item_tree activates setting of bit in flags
     on the field object.
-  */
+*/
 
+static bool fix_fields_part_func(THD *thd, TABLE_LIST *tables,
+                                 Item* func_expr, partition_info *part_info,
+                                 bool is_sub_part)
+{
   bool result= TRUE;
   TABLE *table= tables->table;
   TABLE_LIST *save_table_list, *save_first_table, *save_last_table;
   int error;
   Name_resolution_context *context;
+  const char *save_where;
   DBUG_ENTER("fix_fields_part_func");
 
   context= thd->lex->current_context();
@@ -1299,6 +1493,7 @@
   context->first_name_resolution_table= tables;
   context->last_name_resolution_table= NULL;
   func_expr->walk(&Item::change_context_processor, (byte*) context);
+  save_where= thd->where;
   thd->where= "partition function";
   error= func_expr->fix_fields(thd, (Item**)0);
   context->table_list= save_table_list;
@@ -1310,13 +1505,14 @@
     clear_field_flag(table);
     goto end;
   }
+  thd->where= save_where;
   if (unlikely(func_expr->const_item()))
   {
     my_error(ER_CONST_EXPR_IN_PARTITION_FUNC_ERROR, MYF(0));
     clear_field_flag(table);
     goto end;
   }
-  result= set_up_field_array(table, sub_part);
+  result= set_up_field_array(table, is_sub_part);
 end:
   table->get_fields_in_item_tree= FALSE;
   table->map= 0; //Restore old value
@@ -1325,24 +1521,30 @@
 
 
 /*
-  This function verifies that if there is a primary key that it contains
-  all the fields of the partition function.
-  This is a temporary limitation that will hopefully be removed after a
-  while.
+  Check that the primary key contains all partition fields if defined
+
   SYNOPSIS
     check_primary_key()
     table                TABLE object for which partition fields are set-up
+
   RETURN VALUES
     TRUE                 Not all fields in partitioning function was part
                          of primary key
     FALSE                Ok, all fields of partitioning function were part
                          of primary key
+
+  DESCRIPTION
+    This function verifies that if there is a primary key that it contains
+    all the fields of the partition function.
+    This is a temporary limitation that will hopefully be removed after a
+    while.
 */
 
 static bool check_primary_key(TABLE *table)
 {
   uint primary_key= table->s->primary_key;
-  bool all_fields, some_fields, result= FALSE;
+  bool all_fields, some_fields;
+  bool result= FALSE;
   DBUG_ENTER("check_primary_key");
 
   if (primary_key < MAX_KEY)
@@ -1362,25 +1564,33 @@
 
 
 /*
-  This function verifies that if there is a unique index that it contains
-  all the fields of the partition function.
-  This is a temporary limitation that will hopefully be removed after a
-  while.
+  Check that unique keys contains all partition fields
+
   SYNOPSIS
     check_unique_keys()
     table                TABLE object for which partition fields are set-up
+
   RETURN VALUES
     TRUE                 Not all fields in partitioning function was part
                          of all unique keys
     FALSE                Ok, all fields of partitioning function were part
                          of unique keys
+
+  DESCRIPTION
+    This function verifies that if there is a unique index that it contains
+    all the fields of the partition function.
+    This is a temporary limitation that will hopefully be removed after a
+    while.
 */
 
 static bool check_unique_keys(TABLE *table)
 {
-  bool all_fields, some_fields, result= FALSE;
-  uint keys= table->s->keys, i;
+  bool all_fields, some_fields;
+  bool result= FALSE;
+  uint keys= table->s->keys;
+  uint i;
   DBUG_ENTER("check_unique_keys");
+
   for (i= 0; i < keys; i++)
   {
     if (table->key_info[i].flags & HA_NOSAME) //Unique index
@@ -1444,9 +1654,11 @@
   indicating this to notify that we can use also ranges on the field
   of the PF to deduce a set of partitions if the fields of the PF were
   not all fully bound.
+
   SYNOPSIS
     check_range_capable_PF()
     table                TABLE object for which partition fields are set-up
+
   DESCRIPTION
     Support for this is not implemented yet.
 */
@@ -1454,35 +1666,76 @@
 void check_range_capable_PF(TABLE *table)
 {
   DBUG_ENTER("check_range_capable_PF");
+
   DBUG_VOID_RETURN;
 }
 
 
 /*
+  Set up partition bitmap
+
+  SYNOPSIS
+    set_up_partition_bitmap()
+    thd                  Thread object
+    part_info            Reference to partitioning data structure
+
+  RETURN VALUE
+    TRUE                 Memory allocation failure
+    FALSE                Success
+
+  DESCRIPTION
+    Allocate memory for bitmap of the partitioned table
+    and initialise it.
+*/
+
+static bool set_up_partition_bitmap(THD *thd, partition_info *part_info)
+{
+  uint32 *bitmap_buf;
+  uint bitmap_bits= part_info->no_subparts? 
+                     (part_info->no_subparts* part_info->no_parts):
+                      part_info->no_parts;
+  uint bitmap_bytes= bitmap_buffer_size(bitmap_bits);
+  DBUG_ENTER("set_up_partition_bitmap");
+
+  if (!(bitmap_buf= (uint32*)thd->alloc(bitmap_bytes)))
+  {
+    mem_alloc_error(bitmap_bytes);
+    DBUG_RETURN(TRUE);
+  }
+  bitmap_init(&part_info->used_partitions, bitmap_buf, bitmap_bytes*8, FALSE);
+  DBUG_RETURN(FALSE);
+}
+
+
+/*
   Set up partition key maps
+
   SYNOPSIS
     set_up_partition_key_maps()
     table                TABLE object for which partition fields are set-up
     part_info            Reference to partitioning data structure
+
   RETURN VALUES
     None
+
   DESCRIPTION
-  This function sets up a couple of key maps to be able to quickly check
-  if an index ever can be used to deduce the partition fields or even
-  a part of the fields of the  partition function.
-  We set up the following key_map's.
-  PF = Partition Function
-  1) All fields of the PF is set even by equal on the first fields in the
-     key
-  2) All fields of the PF is set if all fields of the key is set
-  3) At least one field in the PF is set if all fields is set
-  4) At least one field in the PF is part of the key
+    This function sets up a couple of key maps to be able to quickly check
+    if an index ever can be used to deduce the partition fields or even
+    a part of the fields of the  partition function.
+    We set up the following key_map's.
+    PF = Partition Function
+    1) All fields of the PF is set even by equal on the first fields in the
+       key
+    2) All fields of the PF is set if all fields of the key is set
+    3) At least one field in the PF is set if all fields is set
+    4) At least one field in the PF is part of the key
 */
 
 static void set_up_partition_key_maps(TABLE *table,
                                       partition_info *part_info)
 {
-  uint keys= table->s->keys, i;
+  uint keys= table->s->keys;
+  uint i;
   bool all_fields, some_fields;
   DBUG_ENTER("set_up_partition_key_maps");
 
@@ -1517,17 +1770,26 @@
 
 
 /*
-  Set-up all function pointers for calculation of partition id,
-  subpartition id and the upper part in subpartitioning. This is to speed up
-  execution of get_partition_id which is executed once every record to be
-  written and deleted and twice for updates.
+  Set up function pointers for partition function
+
   SYNOPSIS
-    set_up_partition_function_pointers()
+    set_up_partition_func_pointers()
     part_info            Reference to partitioning data structure
+
+  RETURN VALUE
+    NONE
+
+  DESCRIPTION
+    Set-up all function pointers for calculation of partition id,
+    subpartition id and the upper part in subpartitioning. This is to speed up
+    execution of get_partition_id which is executed once every record to be
+    written and deleted and twice for updates.
 */
 
 static void set_up_partition_func_pointers(partition_info *part_info)
 {
+  DBUG_ENTER("set_up_partition_func_pointers");
+
   if (is_sub_partitioned(part_info))
   {
     if (part_info->part_type == RANGE_PARTITION)
@@ -1560,7 +1822,7 @@
         }
       }
     }
-    else //LIST Partitioning
+    else /* LIST Partitioning */
     {
       part_info->get_part_partition_id= get_partition_id_list;
       if (part_info->list_of_subpart_fields)
@@ -1591,7 +1853,7 @@
       }
     }
   }
-  else //No subpartitioning
+  else /* No subpartitioning */
   {
     part_info->get_part_partition_id= NULL;
     part_info->get_subpartition_id= NULL;
@@ -1599,7 +1861,7 @@
       part_info->get_partition_id= get_partition_id_range;
     else if (part_info->part_type == LIST_PARTITION)
       part_info->get_partition_id= get_partition_id_list;
-    else //HASH partitioning
+    else /* HASH partitioning */
     {
       if (part_info->list_of_part_fields)
       {
@@ -1617,21 +1879,27 @@
       }
     }
   }
+  DBUG_VOID_RETURN;
 }
 
 
 /*
   For linear hashing we need a mask which is on the form 2**n - 1 where
   2**n >= no_parts. Thus if no_parts is 6 then mask is 2**3 - 1 = 8 - 1 = 7.
+
   SYNOPSIS
     set_linear_hash_mask()
     part_info            Reference to partitioning data structure
     no_parts             Number of parts in linear hash partitioning
+
+  RETURN VALUE
+    NONE
 */
 
 static void set_linear_hash_mask(partition_info *part_info, uint no_parts)
 {
   uint mask;
+
   for (mask= 1; mask < no_parts; mask<<=1)
     ;
   part_info->linear_hash_mask= mask - 1;
@@ -1641,13 +1909,16 @@
 /*
   This function calculates the partition id provided the result of the hash
   function using linear hashing parameters, mask and number of partitions.
+
   SYNOPSIS
     get_part_id_from_linear_hash()
     hash_value          Hash value calculated by HASH function or KEY function
     mask                Mask calculated previously by set_linear_hash_mask
     no_parts            Number of partitions in HASH partitioned part
+
   RETURN VALUE
     part_id             The calculated partition identity (starting at 0)
+
   DESCRIPTION
     The partition is calculated according to the theory of linear hashing.
     See e.g. Linear hashing: a new tool for file and table addressing,
@@ -1659,10 +1930,11 @@
                                            uint no_parts)
 {
   uint32 part_id= (uint32)(hash_value & mask);
+
   if (part_id >= no_parts)
   {
     uint new_mask= ((mask + 1) >> 1) - 1;
-    part_id= hash_value & new_mask;
+    part_id= (uint32)(hash_value & new_mask);
   }
   return part_id;
 }
@@ -1675,10 +1947,12 @@
     thd                  The thread object
     name                 The name of the partitioned table
     table                TABLE object for which partition fields are set-up
+    create_table_ind     Indicator of whether openfrm was called as part of
+                         CREATE or ALTER TABLE
 
   RETURN VALUE
-    TRUE
-    FALSE
+    TRUE                 Error
+    FALSE                Success
 
   DESCRIPTION
     The name parameter contains the full table name and is used to get the
@@ -1693,7 +1967,8 @@
     of an error that is not discovered until here.
 */
 
-bool fix_partition_func(THD *thd, const char *name, TABLE *table)
+bool fix_partition_func(THD *thd, const char* name, TABLE *table,
+                        bool is_create_table_ind)
 {
   bool result= TRUE;
   uint dir_length, home_dir_length;
@@ -1705,6 +1980,10 @@
   ulong save_set_query_id= thd->set_query_id;
   DBUG_ENTER("fix_partition_func");
 
+  if (part_info->fixed)
+  {
+    DBUG_RETURN(FALSE);
+  }
   thd->set_query_id= 0;
   /*
     Set-up the TABLE_LIST object to be a list with a single table
@@ -1724,6 +2003,13 @@
   db_name= &db_name_string[home_dir_length];
   tables.db= db_name;
 
+  if (!is_create_table_ind)
+  {
+    if (partition_default_handling(table, part_info))
+    {
+      DBUG_RETURN(TRUE);
+    }
+  }
   if (is_sub_partitioned(part_info))
   {
     DBUG_ASSERT(part_info->subpart_type == HASH_PARTITION);
@@ -1786,13 +2072,13 @@
     const char *error_str;
     if (part_info->part_type == RANGE_PARTITION)
     {
-      error_str= range_str; 
+      error_str= partition_keywords[PKW_RANGE].str; 
       if (unlikely(check_range_constants(part_info)))
         goto end;
     }
     else if (part_info->part_type == LIST_PARTITION)
     {
-      error_str= list_str; 
+      error_str= partition_keywords[PKW_LIST].str; 
       if (unlikely(check_list_constants(part_info)))
         goto end;
     }
@@ -1820,12 +2106,16 @@
     goto end;
   if (unlikely(check_primary_key(table)))
     goto end;
-  if (unlikely((!table->file->partition_flags() & HA_CAN_PARTITION_UNIQUE) &&
+  if (unlikely((!(table->s->db_type->partition_flags &&
+      (table->s->db_type->partition_flags() & HA_CAN_PARTITION_UNIQUE))) &&
                check_unique_keys(table)))
     goto end;
+  if (unlikely(set_up_partition_bitmap(thd, part_info)))
+    goto end;
   check_range_capable_PF(table);
   set_up_partition_key_maps(table, part_info);
   set_up_partition_func_pointers(part_info);
+  part_info->fixed= TRUE;
   set_up_range_analysis_info(part_info);
   result= FALSE;
 end:
@@ -1845,6 +2135,7 @@
 static int add_write(File fptr, const char *buf, uint len)
 {
   uint len_written= my_write(fptr, (const byte*)buf, len, MYF(0));
+
   if (likely(len == len_written))
     return 0;
   else
@@ -1889,13 +2180,14 @@
 static int add_part_key_word(File fptr, const char *key_string)
 {
   int err= add_string(fptr, key_string);
+
   err+= add_space(fptr);
   return err + add_begin_parenthesis(fptr);
 }
 
 static int add_hash(File fptr)
 {
-  return add_part_key_word(fptr, hash_str);
+  return add_part_key_word(fptr, partition_keywords[PKW_HASH].str);
 }
 
 static int add_partition(File fptr)
@@ -1907,6 +2199,7 @@
 static int add_subpartition(File fptr)
 {
   int err= add_string(fptr, sub_str);
+
   return err + add_partition(fptr);
 }
 
@@ -1919,6 +2212,7 @@
 static int add_subpartition_by(File fptr)
 {
   int err= add_string(fptr, sub_str);
+
   return err + add_partition_by(fptr);
 }
 
@@ -1926,17 +2220,19 @@
 {
   uint i, no_fields;
   int err;
+
   List_iterator<char> part_it(field_list);
-  err= add_part_key_word(fptr, key_str);
+  err= add_part_key_word(fptr, partition_keywords[PKW_KEY].str);
   no_fields= field_list.elements;
   i= 0;
-  do
+  while (i < no_fields)
   {
     const char *field_str= part_it++;
     err+= add_string(fptr, field_str);
     if (i != (no_fields-1))
       err+= add_comma(fptr);
-  } while (++i < no_fields);
+    i++;
+  }
   return err;
 }
 
@@ -1950,6 +2246,7 @@
                               const char *keystr)
 {
   int err= add_string(fptr, keyword);
+
   err+= add_space(fptr);
   err+= add_equal(fptr);
   err+= add_space(fptr);
@@ -1960,6 +2257,7 @@
 static int add_keyword_int(File fptr, const char *keyword, longlong num)
 {
   int err= add_string(fptr, keyword);
+
   err+= add_space(fptr);
   err+= add_equal(fptr);
   err+= add_space(fptr);
@@ -1970,14 +2268,15 @@
 static int add_engine(File fptr, handlerton *engine_type)
 {
   const char *engine_str= engine_type->name;
+  DBUG_PRINT("info", ("ENGINE = %s", engine_str));
   int err= add_string(fptr, "ENGINE = ");
   return err + add_string(fptr, engine_str);
-  return err;
 }
 
 static int add_partition_options(File fptr, partition_element *p_elem)
 {
   int err= 0;
+
   if (p_elem->tablespace_name)
     err+= add_keyword_string(fptr,"TABLESPACE",p_elem->tablespace_name);
   if (p_elem->nodegroup_id != UNDEF_NODEGROUP)
@@ -1999,6 +2298,7 @@
                          partition_element *p_elem)
 {
   int err= 0;
+
   if (part_info->part_type == RANGE_PARTITION)
   {
     err+= add_string(fptr, "VALUES LESS THAN ");
@@ -2009,7 +2309,7 @@
       err+= add_end_parenthesis(fptr);
     }
     else
-      err+= add_string(fptr, "MAXVALUE");
+      err+= add_string(fptr, partition_keywords[PKW_MAXVALUE].str);
   }
   else if (part_info->part_type == LIST_PARTITION)
   {
@@ -2035,16 +2335,19 @@
   Generate the partition syntax from the partition data structure.
   Useful for support of generating defaults, SHOW CREATE TABLES
   and easy partition management.
+
   SYNOPSIS
     generate_partition_syntax()
     part_info                  The partitioning data structure
     buf_length                 A pointer to the returned buffer length
     use_sql_alloc              Allocate buffer from sql_alloc if true
                                otherwise use my_malloc
-    add_default_info           Add info generated by default
+    write_all                  Write everything, also default values
+
   RETURN VALUES
     NULL error
     buf, buf_length            Buffer and its length
+
   DESCRIPTION
   Here we will generate the full syntax for the given command where all
   defaults have been expanded. By so doing the it is also possible to
@@ -2068,44 +2371,42 @@
 char *generate_partition_syntax(partition_info *part_info,
                                 uint *buf_length,
                                 bool use_sql_alloc,
-				bool add_default_info)
+                                bool write_all)
 {
-  uint i,j, no_parts, no_subparts;
+  uint i,j, tot_no_parts, no_subparts, no_parts;
   partition_element *part_elem;
+  partition_element *save_part_elem= NULL;
   ulonglong buffer_length;
   char path[FN_REFLEN];
   int err= 0;
-  DBUG_ENTER("generate_partition_syntax");
+  List_iterator<partition_element> part_it(part_info->partitions);
+  List_iterator<partition_element> temp_it(part_info->temp_partitions);
   File fptr;
   char *buf= NULL; //Return buffer
-  const char *file_name;
+  uint use_temp= 0;
+  uint no_temp_parts= part_info->temp_partitions.elements;
+  bool write_part_state;
+  DBUG_ENTER("generate_partition_syntax");
 
-  sprintf(path, "%s_%lx_%lx", "part_syntax", current_pid,
-          current_thd->thread_id);
-  fn_format(path,path,mysql_tmpdir,".psy", MY_REPLACE_EXT);
-  file_name= &path[0];
-  DBUG_PRINT("info", ("File name = %s", file_name));
-  if (unlikely(((fptr= my_open(file_name,O_CREAT|O_RDWR, MYF(MY_WME))) == -1)))
+  write_part_state= (part_info->part_state && !part_info->part_state_len);
+  if (unlikely(((fptr= create_temp_file(path,mysql_tmpdir,"psy", 0,0))) < 0))
     DBUG_RETURN(NULL);
-#if defined(MSDOS) || defined(__WIN__) || defined(__EMX__) || defined(OS2)
-#else
-  my_delete(file_name, MYF(0));
+#ifndef __WIN__
+  unlink(path);
 #endif
   err+= add_space(fptr);
   err+= add_partition_by(fptr);
   switch (part_info->part_type)
   {
     case RANGE_PARTITION:
-      add_default_info= TRUE;
-      err+= add_part_key_word(fptr, range_str);
+      err+= add_part_key_word(fptr, partition_keywords[PKW_RANGE].str);
       break;
     case LIST_PARTITION:
-      add_default_info= TRUE;
-      err+= add_part_key_word(fptr, list_str);
+      err+= add_part_key_word(fptr, partition_keywords[PKW_LIST].str);
       break;
     case HASH_PARTITION:
       if (part_info->linear_hash_ind)
-        err+= add_string(fptr, "LINEAR ");
+        err+= add_string(fptr, partition_keywords[PKW_LINEAR].str);
       if (part_info->list_of_part_fields)
         err+= add_key_partition(fptr, part_info->part_field_list);
       else
@@ -2122,6 +2423,13 @@
                          part_info->part_func_len);
   err+= add_end_parenthesis(fptr);
   err+= add_space(fptr);
+  if ((!part_info->use_default_no_partitions) &&
+       part_info->use_default_partitions)
+  {
+    err+= add_string(fptr, "PARTITIONS ");
+    err+= add_int(fptr, part_info->no_parts);
+    err+= add_space(fptr);
+  }
   if (is_sub_partitioned(part_info))
   {
     err+= add_subpartition_by(fptr);
@@ -2135,53 +2443,114 @@
                            part_info->subpart_func_len);
     err+= add_end_parenthesis(fptr);
     err+= add_space(fptr);
+    if ((!part_info->use_default_no_subpartitions) && 
+          part_info->use_default_subpartitions)
+    {
+      err+= add_string(fptr, "SUBPARTITIONS ");
+      err+= add_int(fptr, part_info->no_subparts);
+      err+= add_space(fptr);
+    }
   }
-  if (add_default_info)
-  {
-  err+= add_begin_parenthesis(fptr);
-  List_iterator<partition_element> part_it(part_info->partitions);
   no_parts= part_info->no_parts;
+  tot_no_parts= no_parts + no_temp_parts;
   no_subparts= part_info->no_subparts;
-  i= 0;
-  do
+
+  if (write_all || (!part_info->use_default_partitions))
   {
-    part_elem= part_it++;
-    err+= add_partition(fptr);
-    err+= add_string(fptr, part_elem->partition_name);
-    err+= add_space(fptr);
-    err+= add_partition_values(fptr, part_info, part_elem);
-    if (!is_sub_partitioned(part_info))
-      err+= add_partition_options(fptr, part_elem);
-    if (is_sub_partitioned(part_info))
+    err+= add_begin_parenthesis(fptr);
+    i= 0;
+    do
     {
-      err+= add_space(fptr);
-      err+= add_begin_parenthesis(fptr);
-      List_iterator<partition_element> sub_it(part_elem->subpartitions);
-      j= 0;
-      do
+      /*
+        We need to do some clever list manipulation here since we have two
+        different needs for our list processing and here we take some of the
+        cost of using a simpler list processing for the other parts of the
+        code.
+
+        ALTER TABLE REORGANIZE PARTITIONS has the list of partitions to be
+        the final list as the main list and the reorganised partitions is in
+        the temporary partition list. Thus when finding the first part added
+        we insert the temporary list if there is such a list. If there is no
+        temporary list we are performing an ADD PARTITION.
+      */
+      if (use_temp && use_temp <= no_temp_parts)
+      {
+        part_elem= temp_it++;
+        DBUG_ASSERT(no_temp_parts);
+        no_temp_parts--;
+      }
+      else if (use_temp)
+      {
+        DBUG_ASSERT(no_parts);
+        part_elem= save_part_elem;
+        use_temp= 0;
+        no_parts--;
+      }
+      else
       {
-        part_elem= sub_it++;
-        err+= add_subpartition(fptr);
+        part_elem= part_it++;
+        if ((part_elem->part_state == PART_TO_BE_ADDED ||
+             part_elem->part_state == PART_IS_ADDED) && no_temp_parts)
+        {
+          save_part_elem= part_elem;
+          part_elem= temp_it++;
+          no_temp_parts--;
+          use_temp= 1;
+        }
+        else
+        {
+          DBUG_ASSERT(no_parts);
+          no_parts--;
+        }
+      }
+
+      if (part_elem->part_state != PART_IS_DROPPED)
+      {
+        if (write_part_state)
+        {
+          uint32 part_state_id= part_info->part_state_len;
+          part_info->part_state[part_state_id]= (uchar)part_elem->part_state;
+          part_info->part_state_len= part_state_id+1;
+        }
+        err+= add_partition(fptr);
         err+= add_string(fptr, part_elem->partition_name);
         err+= add_space(fptr);
-        err+= add_partition_options(fptr, part_elem);
-        if (j != (no_subparts-1))
+        err+= add_partition_values(fptr, part_info, part_elem);
+        if (!is_sub_partitioned(part_info))
+          err+= add_partition_options(fptr, part_elem);
+        if (is_sub_partitioned(part_info) &&
+            (write_all || (!part_info->use_default_subpartitions)))
+        {
+          err+= add_space(fptr);
+          err+= add_begin_parenthesis(fptr);
+          List_iterator<partition_element> sub_it(part_elem->subpartitions);
+          j= 0;
+          do
+          {
+            part_elem= sub_it++;
+            err+= add_subpartition(fptr);
+            err+= add_string(fptr, part_elem->partition_name);
+            err+= add_space(fptr);
+            err+= add_partition_options(fptr, part_elem);
+            if (j != (no_subparts-1))
+            {
+              err+= add_comma(fptr);
+              err+= add_space(fptr);
+            }
+            else
+              err+= add_end_parenthesis(fptr);
+          } while (++j < no_subparts);
+        }
+        if (i != (tot_no_parts-1))
         {
           err+= add_comma(fptr);
           err+= add_space(fptr);
         }
-        else
-          err+= add_end_parenthesis(fptr);
-      } while (++j < no_subparts);
-    }
-    if (i != (no_parts-1))
-    {
-      err+= add_comma(fptr);
-      err+= add_space(fptr);
-    }
-    else
-      err+= add_end_parenthesis(fptr);
-  } while (++i < no_parts);
+      }
+      if (i == (tot_no_parts-1))
+        err+= add_end_parenthesis(fptr);
+    } while (++i < tot_no_parts);
+    DBUG_ASSERT(!no_parts && !no_temp_parts);
   }
   if (err)
     goto close_file;
@@ -2209,19 +2578,7 @@
     buf[*buf_length]= 0;
 
 close_file:
-  /*
-    Delete the file before closing to ensure the file doesn't get synched
-    to disk unnecessary. We only used the file system as a dynamic array
-    implementation so we are not really interested in getting the file
-    present on disk.
-    This is not possible on Windows so here it has to be done after closing
-    the file. Also on Unix we delete immediately after opening to ensure no
-    other process can read the information written into the file.
-  */
   my_close(fptr, MYF(0));
-#if defined(MSDOS) || defined(__WIN__) || defined(__EMX__) || defined(OS2)
-  my_delete(file_name, MYF(0));
-#endif
   DBUG_RETURN(buf);
 }
 
@@ -2229,10 +2586,12 @@
 /*
   Check if partition key fields are modified and if it can be handled by the
   underlying storage engine.
+
   SYNOPSIS
     partition_key_modified
     table                TABLE object for which partition fields are set-up
     fields               A list of the to be modifed
+
   RETURN VALUES
     TRUE                 Need special handling of UPDATE
     FALSE                Normal UPDATE handling is ok
@@ -2244,9 +2603,11 @@
   partition_info *part_info= table->part_info;
   Item_field *item_field;
   DBUG_ENTER("partition_key_modified");
+
   if (!part_info)
     DBUG_RETURN(FALSE);
-  if (table->file->partition_flags() & HA_CAN_UPDATE_PARTITION_KEY)
+  if (table->s->db_type->partition_flags &&
+      (table->s->db_type->partition_flags() & HA_CAN_UPDATE_PARTITION_KEY))
     DBUG_RETURN(FALSE);
   f.rewind();
   while ((item_field=(Item_field*) f++))
@@ -2276,11 +2637,14 @@
 
 /*
   Calculate hash value for KEY partitioning using an array of fields.
+
   SYNOPSIS
     calculate_key_value()
     field_array             An array of the fields in KEY partitioning
+
   RETURN VALUE
     hash_value calculated
+
   DESCRIPTION
     Uses the hash function on the character set of the field. Integer and
     floating point fields use the binary character set by default.
@@ -2290,6 +2654,7 @@
 {
   uint32 hashnr= 0;
   ulong nr2= 4;
+
   do
   {
     Field *field= *field_array;
@@ -2313,6 +2678,7 @@
 /*
   A simple support function to calculate part_id given local part and
   sub part.
+
   SYNOPSIS
     get_part_id_for_sub()
     loc_part_id             Local partition id
@@ -2330,31 +2696,40 @@
 
 /*
   Calculate part_id for (SUB)PARTITION BY HASH
+
   SYNOPSIS
     get_part_id_hash()
     no_parts                 Number of hash partitions
     part_expr                Item tree of hash function
+    out:func_value      Value of hash function
+
   RETURN VALUE
     Calculated partition id
 */
 
 inline
 static uint32 get_part_id_hash(uint no_parts,
-                               Item *part_expr)
+                               Item *part_expr,
+                               longlong *func_value)
 {
   DBUG_ENTER("get_part_id_hash");
-  DBUG_RETURN((uint32)(part_expr->val_int() % no_parts));
+  *func_value= part_expr->val_int();
+  longlong int_hash_id= *func_value % no_parts;
+  DBUG_RETURN(int_hash_id < 0 ? -int_hash_id : int_hash_id);
 }
 
 
 /*
   Calculate part_id for (SUB)PARTITION BY LINEAR HASH
+
   SYNOPSIS
     get_part_id_linear_hash()
     part_info           A reference to the partition_info struct where all the
                         desired information is given
     no_parts            Number of hash partitions
     part_expr           Item tree of hash function
+    out:func_value      Value of hash function
+
   RETURN VALUE
     Calculated partition id
 */
@@ -2362,10 +2737,13 @@
 inline
 static uint32 get_part_id_linear_hash(partition_info *part_info,
                                       uint no_parts,
-                                      Item *part_expr)
+                                      Item *part_expr,
+                                      longlong *func_value)
 {
   DBUG_ENTER("get_part_id_linear_hash");
-  DBUG_RETURN(get_part_id_from_linear_hash(part_expr->val_int(),
+
+  *func_value= part_expr->val_int();
+  DBUG_RETURN(get_part_id_from_linear_hash(*func_value,
                                            part_info->linear_hash_mask,
                                            no_parts));
 }
@@ -2373,31 +2751,37 @@
 
 /*
   Calculate part_id for (SUB)PARTITION BY KEY
+
   SYNOPSIS
     get_part_id_key()
     field_array         Array of fields for PARTTION KEY
     no_parts            Number of KEY partitions
+
   RETURN VALUE
     Calculated partition id
 */
 
 inline
 static uint32 get_part_id_key(Field **field_array,
-                              uint no_parts)
+                              uint no_parts,
+                              longlong *func_value)
 {
   DBUG_ENTER("get_part_id_key");
-  DBUG_RETURN(calculate_key_value(field_array) % no_parts);
+  *func_value= calculate_key_value(field_array);
+  DBUG_RETURN(*func_value % no_parts);
 }
 
 
 /*
   Calculate part_id for (SUB)PARTITION BY LINEAR KEY
+
   SYNOPSIS
     get_part_id_linear_key()
     part_info           A reference to the partition_info struct where all the
                         desired information is given
     field_array         Array of fields for PARTTION KEY
     no_parts            Number of KEY partitions
+
   RETURN VALUE
     Calculated partition id
 */
@@ -2405,10 +2789,13 @@
 inline
 static uint32 get_part_id_linear_key(partition_info *part_info,
                                      Field **field_array,
-                                     uint no_parts)
+                                     uint no_parts,
+                                     longlong *func_value)
 {
   DBUG_ENTER("get_partition_id_linear_key");
-  DBUG_RETURN(get_part_id_from_linear_hash(calculate_key_value(field_array),
+
+  *func_value= calculate_key_value(field_array);
+  DBUG_RETURN(get_part_id_from_linear_hash(*func_value,
                                            part_info->linear_hash_mask,
                                            no_parts));
 }
@@ -2417,15 +2804,18 @@
   This function is used to calculate the partition id where all partition
   fields have been prepared to point to a record where the partition field
   values are bound.
+
   SYNOPSIS
     get_partition_id()
     part_info           A reference to the partition_info struct where all the
                         desired information is given
-    part_id             The partition id is returned through this pointer
+    out:part_id         The partition id is returned through this pointer
+
   RETURN VALUE
     part_id
     return TRUE means that the fields of the partition function didn't fit
     into any partition and thus the values of the PF-fields are not allowed.
+
   DESCRIPTION
     A routine used from write_row, update_row and delete_row from any
     handler supporting partitioning. It is also a support routine for
@@ -2455,15 +2845,18 @@
   This function is used to calculate the main partition to use in the case of
   subpartitioning and we don't know enough to get the partition identity in
   total.
+
   SYNOPSIS
     get_part_partition_id()
     part_info           A reference to the partition_info struct where all the
                         desired information is given
-    part_id             The partition id is returned through this pointer
+    out:part_id         The partition id is returned through this pointer
+
   RETURN VALUE
     part_id
     return TRUE means that the fields of the partition function didn't fit
     into any partition and thus the values of the PF-fields are not allowed.
+
   DESCRIPTION
     
     It is actually 6 different variants of this function which are called
@@ -2478,15 +2871,19 @@
 */
 
 
-bool get_partition_id_list(partition_info *part_info,
-                           uint32 *part_id)
+int get_partition_id_list(partition_info *part_info,
+                           uint32 *part_id,
+                           longlong *func_value)
 {
-  DBUG_ENTER("get_partition_id_list");
   LIST_PART_ENTRY *list_array= part_info->list_array;
-  uint list_index;
+  int list_index;
   longlong list_value;
-  uint min_list_index= 0, max_list_index= part_info->no_list_values - 1;
+  int min_list_index= 0;
+  int max_list_index= part_info->no_list_values - 1;
   longlong part_func_value= part_info->part_expr->val_int();
+  DBUG_ENTER("get_partition_id_list");
+
+  *func_value= part_func_value;
   while (max_list_index >= min_list_index)
   {
     list_index= (max_list_index + min_list_index) >> 1;
@@ -2502,12 +2899,12 @@
     else
     {
       *part_id= (uint32)list_array[list_index].partition_id;
-      DBUG_RETURN(FALSE);
+      DBUG_RETURN(0);
     }
   }
 notfound:
   *part_id= 0;
-  DBUG_RETURN(TRUE);
+  DBUG_RETURN(HA_ERR_NO_PARTITION_FOUND);
 }
 
 
@@ -2584,14 +2981,18 @@
 }
 
 
-bool get_partition_id_range(partition_info *part_info,
-                            uint32 *part_id)
+int get_partition_id_range(partition_info *part_info,
+                            uint32 *part_id,
+                            longlong *func_value)
 {
-  DBUG_ENTER("get_partition_id_int_range");
   longlong *range_array= part_info->range_int_array;
   uint max_partition= part_info->no_parts - 1;
-  uint min_part_id= 0, max_part_id= max_partition, loc_part_id;
+  uint min_part_id= 0;
+  uint max_part_id= max_partition;
+  uint loc_part_id;
   longlong part_func_value= part_info->part_expr->val_int();
+  DBUG_ENTER("get_partition_id_int_range");
+
   while (max_part_id > min_part_id)
   {
     loc_part_id= (max_part_id + min_part_id + 1) >> 1;
@@ -2605,11 +3006,12 @@
     if (loc_part_id != max_partition)
       loc_part_id++;
   *part_id= (uint32)loc_part_id;
+  *func_value= part_func_value;
   if (loc_part_id == max_partition)
     if (range_array[loc_part_id] != LONGLONG_MAX)
       if (part_func_value >= range_array[loc_part_id])
-        DBUG_RETURN(TRUE);
-  DBUG_RETURN(FALSE);
+        DBUG_RETURN(HA_ERR_NO_PARTITION_FOUND);
+  DBUG_RETURN(0);
 }
 
 
@@ -2695,191 +3097,246 @@
 }
 
 
-bool get_partition_id_hash_nosub(partition_info *part_info,
-                                 uint32 *part_id)
+int get_partition_id_hash_nosub(partition_info *part_info,
+                                 uint32 *part_id,
+                                 longlong *func_value)
 {
-  *part_id= get_part_id_hash(part_info->no_parts, part_info->part_expr);
-  return FALSE;
+  *part_id= get_part_id_hash(part_info->no_parts, part_info->part_expr,
+                             func_value);
+  return 0;
 }
 
 
-bool get_partition_id_linear_hash_nosub(partition_info *part_info,
-                                        uint32 *part_id)
+int get_partition_id_linear_hash_nosub(partition_info *part_info,
+                                        uint32 *part_id,
+                                        longlong *func_value)
 {
   *part_id= get_part_id_linear_hash(part_info, part_info->no_parts,
-                                    part_info->part_expr);
-  return FALSE;
+                                    part_info->part_expr, func_value);
+  return 0;
 }
 
 
-bool get_partition_id_key_nosub(partition_info *part_info,
-                                uint32 *part_id)
+int get_partition_id_key_nosub(partition_info *part_info,
+                                uint32 *part_id,
+                                longlong *func_value)
 {
-  *part_id= get_part_id_key(part_info->part_field_array, part_info->no_parts);
-  return FALSE;
+  *part_id= get_part_id_key(part_info->part_field_array,
+                            part_info->no_parts, func_value);
+  return 0;
 }
 
 
-bool get_partition_id_linear_key_nosub(partition_info *part_info,
-                                       uint32 *part_id)
+int get_partition_id_linear_key_nosub(partition_info *part_info,
+                                       uint32 *part_id,
+                                       longlong *func_value)
 {
   *part_id= get_part_id_linear_key(part_info,
                                    part_info->part_field_array,
-                                   part_info->no_parts);
-  return FALSE;
+                                   part_info->no_parts, func_value);
+  return 0;
 }
 
 
-bool get_partition_id_range_sub_hash(partition_info *part_info,
-                                     uint32 *part_id)
+int get_partition_id_range_sub_hash(partition_info *part_info,
+                                     uint32 *part_id,
+                                     longlong *func_value)
 {
   uint32 loc_part_id, sub_part_id;
   uint no_subparts;
+  longlong local_func_value;
+  int error;
   DBUG_ENTER("get_partition_id_range_sub_hash");
-  if (unlikely(get_partition_id_range(part_info, &loc_part_id)))
+
+  if (unlikely((error= get_partition_id_range(part_info, &loc_part_id,
+                                              func_value))))
   {
-    DBUG_RETURN(TRUE);
+    DBUG_RETURN(error);
   }
   no_subparts= part_info->no_subparts;
-  sub_part_id= get_part_id_hash(no_subparts, part_info->subpart_expr);
+  sub_part_id= get_part_id_hash(no_subparts, part_info->subpart_expr,
+                                &local_func_value);
   *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
-  DBUG_RETURN(FALSE);
+  DBUG_RETURN(0);
 }
 
 
-bool get_partition_id_range_sub_linear_hash(partition_info *part_info,
-                                            uint32 *part_id)
+int get_partition_id_range_sub_linear_hash(partition_info *part_info,
+                                            uint32 *part_id,
+                                            longlong *func_value)
 {
   uint32 loc_part_id, sub_part_id;
   uint no_subparts;
+  longlong local_func_value;
+  int error;
   DBUG_ENTER("get_partition_id_range_sub_linear_hash");
-  if (unlikely(get_partition_id_range(part_info, &loc_part_id)))
+
+  if (unlikely((error= get_partition_id_range(part_info, &loc_part_id,
+                                              func_value))))
   {
-    DBUG_RETURN(TRUE);
+    DBUG_RETURN(error);
   }
   no_subparts= part_info->no_subparts;
   sub_part_id= get_part_id_linear_hash(part_info, no_subparts,
-                                       part_info->subpart_expr);
+                                       part_info->subpart_expr,
+                                       &local_func_value);
   *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
-  DBUG_RETURN(FALSE);
+  DBUG_RETURN(0);
 }
 
 
-bool get_partition_id_range_sub_key(partition_info *part_info,
-                                    uint32 *part_id)
+int get_partition_id_range_sub_key(partition_info *part_info,
+                                    uint32 *part_id,
+                                    longlong *func_value)
 {
   uint32 loc_part_id, sub_part_id;
   uint no_subparts;
+  longlong local_func_value;
+  int error;
   DBUG_ENTER("get_partition_id_range_sub_key");
-  if (unlikely(get_partition_id_range(part_info, &loc_part_id)))
+
+  if (unlikely((error= get_partition_id_range(part_info, &loc_part_id,
+                                              func_value))))
   {
-    DBUG_RETURN(TRUE);
+    DBUG_RETURN(error);
   }
   no_subparts= part_info->no_subparts;
-  sub_part_id= get_part_id_key(part_info->subpart_field_array, no_subparts);
+  sub_part_id= get_part_id_key(part_info->subpart_field_array,
+                               no_subparts, &local_func_value);
   *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
-  DBUG_RETURN(FALSE);
+  DBUG_RETURN(0);
 }
 
 
-bool get_partition_id_range_sub_linear_key(partition_info *part_info,
-                                           uint32 *part_id)
+int get_partition_id_range_sub_linear_key(partition_info *part_info,
+                                           uint32 *part_id,
+                                           longlong *func_value)
 {
   uint32 loc_part_id, sub_part_id;
   uint no_subparts;
+  longlong local_func_value;
+  int error;
   DBUG_ENTER("get_partition_id_range_sub_linear_key");
-  if (unlikely(get_partition_id_range(part_info, &loc_part_id)))
+
+  if (unlikely((error= get_partition_id_range(part_info, &loc_part_id,
+                                              func_value))))
   {
-    DBUG_RETURN(TRUE);
+    DBUG_RETURN(error);
   }
   no_subparts= part_info->no_subparts;
   sub_part_id= get_part_id_linear_key(part_info,
                                       part_info->subpart_field_array,
-                                      no_subparts);
+                                      no_subparts, &local_func_value);
   *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
-  DBUG_RETURN(FALSE);
+  DBUG_RETURN(0);
 }
 
 
-bool get_partition_id_list_sub_hash(partition_info *part_info,
-                                    uint32 *part_id)
+int get_partition_id_list_sub_hash(partition_info *part_info,
+                                    uint32 *part_id,
+                                    longlong *func_value)
 {
   uint32 loc_part_id, sub_part_id;
   uint no_subparts;
+  longlong local_func_value;
+  int error;
   DBUG_ENTER("get_partition_id_list_sub_hash");
-  if (unlikely(get_partition_id_list(part_info, &loc_part_id)))
+
+  if (unlikely((error= get_partition_id_list(part_info, &loc_part_id,
+                                             func_value))))
   {
-    DBUG_RETURN(TRUE);
+    DBUG_RETURN(error);
   }
   no_subparts= part_info->no_subparts;
-  sub_part_id= get_part_id_hash(no_subparts, part_info->subpart_expr);
+  sub_part_id= get_part_id_hash(no_subparts, part_info->subpart_expr,
+                                &local_func_value);
   *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
-  DBUG_RETURN(FALSE);
+  DBUG_RETURN(0);
 }
 
 
-bool get_partition_id_list_sub_linear_hash(partition_info *part_info,
-                                           uint32 *part_id)
+int get_partition_id_list_sub_linear_hash(partition_info *part_info,
+                                           uint32 *part_id,
+                                           longlong *func_value)
 {
   uint32 loc_part_id, sub_part_id;
   uint no_subparts;
+  longlong local_func_value;
+  int error;
   DBUG_ENTER("get_partition_id_list_sub_linear_hash");
-  if (unlikely(get_partition_id_list(part_info, &loc_part_id)))
+
+  if (unlikely((error= get_partition_id_list(part_info, &loc_part_id,
+                                             func_value))))
   {
-    DBUG_RETURN(TRUE);
+    DBUG_RETURN(error);
   }
   no_subparts= part_info->no_subparts;
-  sub_part_id= get_part_id_hash(no_subparts, part_info->subpart_expr);
+  sub_part_id= get_part_id_linear_hash(part_info, no_subparts,
+                                       part_info->subpart_expr,
+                                       &local_func_value);
   *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
-  DBUG_RETURN(FALSE);
+  DBUG_RETURN(0);
 }
 
 
-bool get_partition_id_list_sub_key(partition_info *part_info,
-                                   uint32 *part_id)
+int get_partition_id_list_sub_key(partition_info *part_info,
+                                   uint32 *part_id,
+                                   longlong *func_value)
 {
   uint32 loc_part_id, sub_part_id;
   uint no_subparts;
+  longlong local_func_value;
+  int error;
   DBUG_ENTER("get_partition_id_range_sub_key");
-  if (unlikely(get_partition_id_list(part_info, &loc_part_id)))
+
+  if (unlikely((error= get_partition_id_list(part_info, &loc_part_id,
+                                             func_value))))
   {
-    DBUG_RETURN(TRUE);
+    DBUG_RETURN(error);
   }
   no_subparts= part_info->no_subparts;
-  sub_part_id= get_part_id_key(part_info->subpart_field_array, no_subparts);
+  sub_part_id= get_part_id_key(part_info->subpart_field_array,
+                               no_subparts, &local_func_value);
   *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
-  DBUG_RETURN(FALSE);
+  DBUG_RETURN(0);
 }
 
 
-bool get_partition_id_list_sub_linear_key(partition_info *part_info,
-                                          uint32 *part_id)
+int get_partition_id_list_sub_linear_key(partition_info *part_info,
+                                          uint32 *part_id,
+                                          longlong *func_value)
 {
   uint32 loc_part_id, sub_part_id;
   uint no_subparts;
+  longlong local_func_value;
+  int error;
   DBUG_ENTER("get_partition_id_list_sub_linear_key");
-  if (unlikely(get_partition_id_list(part_info, &loc_part_id)))
+
+  if (unlikely((error= get_partition_id_list(part_info, &loc_part_id,
+                                             func_value))))
   {
-    DBUG_RETURN(TRUE);
+    DBUG_RETURN(error);
   }
   no_subparts= part_info->no_subparts;
   sub_part_id= get_part_id_linear_key(part_info,
                                       part_info->subpart_field_array,
-                                      no_subparts);
+                                      no_subparts, &local_func_value);
   *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
-  DBUG_RETURN(FALSE);
+  DBUG_RETURN(0);
 }
 
 
 /*
   This function is used to calculate the subpartition id
+
   SYNOPSIS
     get_subpartition_id()
     part_info           A reference to the partition_info struct where all the
                         desired information is given
+
   RETURN VALUE
-    part_id
-    The subpartition identity
+    part_id             The subpartition identity
+
   DESCRIPTION
     A routine used in some SELECT's when only partial knowledge of the
     partitions is known.
@@ -2895,38 +3352,45 @@
 
 uint32 get_partition_id_hash_sub(partition_info *part_info)
 {
-  return get_part_id_hash(part_info->no_subparts, part_info->subpart_expr);
+  longlong func_value;
+  return get_part_id_hash(part_info->no_subparts, part_info->subpart_expr,
+                          &func_value);
 }
 
 
 uint32 get_partition_id_linear_hash_sub(partition_info *part_info)
 {
+  longlong func_value;
   return get_part_id_linear_hash(part_info, part_info->no_subparts,
-                                 part_info->subpart_expr);
+                                 part_info->subpart_expr, &func_value);
 }
 
 
 uint32 get_partition_id_key_sub(partition_info *part_info)
 {
+  longlong func_value;
   return get_part_id_key(part_info->subpart_field_array,
-                         part_info->no_subparts);
+                         part_info->no_subparts, &func_value);
 }
 
 
 uint32 get_partition_id_linear_key_sub(partition_info *part_info)
 {
+  longlong func_value;
   return get_part_id_linear_key(part_info,
                                 part_info->subpart_field_array,
-                                part_info->no_subparts);
+                                part_info->no_subparts, &func_value);
 }
 
 
 /*
-  Set an indicator on all partition fields that are set by the key 
+  Set an indicator on all partition fields that are set by the key
+
   SYNOPSIS
     set_PF_fields_in_key()
     key_info                   Information about the index
     key_length                 Length of key
+
   RETURN VALUE
     TRUE                       Found partition field set by key
     FALSE                      No partition field set by key
@@ -2967,9 +3431,11 @@
 /*
   We have found that at least one partition field was set by a key, now
   check if a partition function has all its fields bound or not.
+
   SYNOPSIS
     check_part_func_bound()
     ptr                     Array of fields NULL terminated (partition fields)
+
   RETURN VALUE
     TRUE                    All fields in partition function are set
     FALSE                   Not all fields in partition function are set
@@ -2995,14 +3461,17 @@
 /*
   Get the id of the subpartitioning part by using the key buffer of the
   index scan.
+
   SYNOPSIS
     get_sub_part_id_from_key()
     table         The table object
     buf           A buffer that can be used to evaluate the partition function
     key_info      The index object
     key_spec      A key_range containing key and key length
+
   RETURN VALUES
     part_id       Subpartition id to use
+
   DESCRIPTION
     Use key buffer to set-up record in buf, move field pointers and
     get the partition identity and restore field pointers afterwards.
@@ -3033,36 +3502,43 @@
 /*
   Get the id of the partitioning part by using the key buffer of the
   index scan.
+
   SYNOPSIS
     get_part_id_from_key()
     table         The table object
     buf           A buffer that can be used to evaluate the partition function
     key_info      The index object
     key_spec      A key_range containing key and key length
-    part_id       Partition to use
+    out:part_id   Partition to use
+
   RETURN VALUES
     TRUE          Partition to use not found
     FALSE         Ok, part_id indicates partition to use
+
   DESCRIPTION
     Use key buffer to set-up record in buf, move field pointers and
     get the partition identity and restore field pointers afterwards.
 */
+
 bool get_part_id_from_key(const TABLE *table, byte *buf, KEY *key_info,
                           const key_range *key_spec, uint32 *part_id)
 {
   bool result;
   byte *rec0= table->record[0];
   partition_info *part_info= table->part_info;
+  longlong func_value;
   DBUG_ENTER("get_part_id_from_key");
 
   key_restore(buf, (byte*)key_spec->key, key_info, key_spec->length);
   if (likely(rec0 == buf))
-    result= part_info->get_part_partition_id(part_info, part_id);
+    result= part_info->get_part_partition_id(part_info, part_id,
+                                             &func_value);
   else
   {
     Field **part_field_array= part_info->part_field_array;
     set_field_ptr(part_field_array, buf, rec0);
-    result= part_info->get_part_partition_id(part_info, part_id);
+    result= part_info->get_part_partition_id(part_info, part_id,
+                                             &func_value);
     set_field_ptr(part_field_array, rec0, buf);
   }
   DBUG_RETURN(result);
@@ -3071,16 +3547,19 @@
 /*
   Get the partitioning id of the full PF by using the key buffer of the
   index scan.
+
   SYNOPSIS
     get_full_part_id_from_key()
     table         The table object
     buf           A buffer that is used to evaluate the partition function
     key_info      The index object
     key_spec      A key_range containing key and key length
-    part_spec     A partition id containing start part and end part
+    out:part_spec A partition id containing start part and end part
+
   RETURN VALUES
     part_spec
     No partitions to scan is indicated by end_part > start_part when returning
+
   DESCRIPTION
     Use key buffer to set-up record in buf, move field pointers if needed and
     get the partition identity and restore field pointers afterwards.
@@ -3094,16 +3573,19 @@
   bool result;
   partition_info *part_info= table->part_info;
   byte *rec0= table->record[0];
+  longlong func_value;
   DBUG_ENTER("get_full_part_id_from_key");
 
   key_restore(buf, (byte*)key_spec->key, key_info, key_spec->length);
   if (likely(rec0 == buf))
-    result= part_info->get_partition_id(part_info, &part_spec->start_part);
+    result= part_info->get_partition_id(part_info, &part_spec->start_part,
+                                        &func_value);
   else
   {
     Field **part_field_array= part_info->full_part_field_array;
     set_field_ptr(part_field_array, buf, rec0);
-    result= part_info->get_partition_id(part_info, &part_spec->start_part);
+    result= part_info->get_partition_id(part_info, &part_spec->start_part,
+                                        &func_value);
     set_field_ptr(part_field_array, rec0, buf);
   }
   part_spec->end_part= part_spec->start_part;
@@ -3114,14 +3596,16 @@
     
 /*
   Get the set of partitions to use in query.
+
   SYNOPSIS
     get_partition_set()
     table         The table object
     buf           A buffer that can be used to evaluate the partition function
     index         The index of the key used, if MAX_KEY no index used
     key_spec      A key_range containing key and key length
-    part_spec     Contains start part, end part and indicator if bitmap is
+    out:part_spec Contains start part, end part and indicator if bitmap is
                   used for which partitions to scan
+
   DESCRIPTION
     This function is called to discover which partitions to use in an index
     scan or a full table scan.
@@ -3131,6 +3615,7 @@
     If start_part > end_part at return it means no partition needs to be
     scanned. If start_part == end_part it always means a single partition
     needs to be scanned.
+
   RETURN VALUE
     part_spec
 */
@@ -3138,7 +3623,8 @@
                        const key_range *key_spec, part_id_range *part_spec)
 {
   partition_info *part_info= table->part_info;
-  uint no_parts= get_tot_partitions(part_info), i, part_id;
+  uint no_parts= get_tot_partitions(part_info);
+  uint i, part_id;
   uint sub_part= no_parts;
   uint32 part_part= no_parts;
   KEY *key_info= NULL;
@@ -3180,7 +3666,8 @@
           sub_part= get_sub_part_id_from_key(table, buf, key_info, key_spec);
         else if (part_info->all_fields_in_PPF.is_set(index))
         {
-          if (get_part_id_from_key(table,buf,key_info,key_spec,(uint32*)&part_part))
+          if (get_part_id_from_key(table,buf,key_info,
+                                   key_spec,(uint32*)&part_part))
           {
             /*
               The value of the RANGE or LIST partitioning was outside of
@@ -3215,15 +3702,18 @@
           clear_indicator_in_key_fields(key_info);
           DBUG_VOID_RETURN; 
         }
-        else if (check_part_func_bound(part_info->part_field_array))
-          sub_part= get_sub_part_id_from_key(table, buf, key_info, key_spec);
-        else if (check_part_func_bound(part_info->subpart_field_array))
+        else if (is_sub_partitioned(part_info))
         {
-          if (get_part_id_from_key(table,buf,key_info,key_spec,(uint32*)&part_part))
+          if (check_part_func_bound(part_info->subpart_field_array))
+            sub_part= get_sub_part_id_from_key(table, buf, key_info, key_spec);
+          else if (check_part_func_bound(part_info->part_field_array))
           {
-            part_spec->start_part= no_parts;
-            clear_indicator_in_key_fields(key_info);
-            DBUG_VOID_RETURN;
+            if (get_part_id_from_key(table,buf,key_info,key_spec,&part_part))
+            {
+              part_spec->start_part= no_parts;
+              clear_indicator_in_key_fields(key_info);
+              DBUG_VOID_RETURN;
+            }
           }
         }
       }
@@ -3292,10 +3782,10 @@
    | Forminfo     288 bytes      |
    -------------------------------
    | Screen buffer, to make      |
-   | field names readable        |
    -------------------------------
    | Packed field info           |
-   | 17 + 1 + strlen(field_name) |
    | + 1 end of file character   |
    -------------------------------
    | Partition info              |
@@ -3304,15 +3794,20 @@
 
    Read the partition syntax from the frm file and parse it to get the
    data structures of the partitioning.
+
    SYNOPSIS
      mysql_unpack_partition()
-     file                          File reference of frm file
      thd                           Thread object
+     part_buf                      Partition info from frm file
      part_info_len                 Length of partition syntax
      table                         Table object of partitioned table
+     create_table_ind              Is it called from CREATE TABLE
+     default_db_type               What is the default engine of the table
+
    RETURN VALUE
      TRUE                          Error
      FALSE                         Sucess
+
    DESCRIPTION
      Read the partition syntax from the current position in the frm file.
      Initiate a LEX object, save the list of item tree objects to free after
@@ -3325,13 +3820,16 @@
 */
 
 bool mysql_unpack_partition(THD *thd, const uchar *part_buf,
-                            uint part_info_len, TABLE* table,
+                            uint part_info_len,
+                            uchar *part_state, uint part_state_len,
+                            TABLE* table, bool is_create_table_ind,
                             handlerton *default_db_type)
 {
   Item *thd_free_list= thd->free_list;
   bool result= TRUE;
   partition_info *part_info;
-  LEX *old_lex= thd->lex, lex;
+  LEX *old_lex= thd->lex;
+  LEX lex;
   DBUG_ENTER("mysql_unpack_partition");
 
   thd->lex= &lex;
@@ -3354,13 +3852,59 @@
     we then save in the partition info structure.
   */
   thd->free_list= NULL;
-  lex.part_info= (partition_info*)1; //Indicate yyparse from this place
+  lex.part_info= new partition_info();/* Indicates yyparse from this place */
+  if (!lex.part_info)
+  {
+    mem_alloc_error(sizeof(partition_info));
+    goto end;
+  }
+  lex.part_info->part_state= part_state;
+  lex.part_info->part_state_len= part_state_len;
+  DBUG_PRINT("info", ("Parse: %s", part_buf));
   if (yyparse((void*)thd) || thd->is_fatal_error)
   {
     free_items(thd->free_list);
     goto end;
   }
+  /*
+    The parsed syntax residing in the frm file can still contain defaults.
+    The reason is that the frm file is sometimes saved outside of this
+    MySQL Server and used in backup and restore of clusters or partitioned
+    tables. It is not certain that the restore will restore exactly the
+    same default partitioning.
+    
+    The easiest manner of handling this is to simply continue using the
+    part_info we already built up during mysql_create_table if we are
+    in the process of creating a table. If the table already exists we
+    need to discover the number of partitions for the default parts. Since
+    the handler object hasn't been created here yet we need to postpone this
+    to the fix_partition_func method.
+  */
+
+  DBUG_PRINT("info", ("Successful parse"));
   part_info= lex.part_info;
+  DBUG_PRINT("info", ("default engine = %d", ha_legacy_type(part_info->default_engine_type)));
+  if (is_create_table_ind)
+  {
+    if (old_lex->name)
+    {
+      /*
+        This code is executed when we do a CREATE TABLE t1 LIKE t2
+        old_lex->name contains the t2 and the table we are opening has 
+        name t1.
+      */
+      Table_ident *ti= (Table_ident*)old_lex->name;
+      const char *db_name= ti->db.str ? ti->db.str : thd->db;
+      const char *table_name= ti->table.str;
+      handler *file;
+      if (partition_default_handling(table, part_info))
+      {
+        DBUG_RETURN(TRUE);
+      }
+    }
+    else
+      part_info= old_lex->part_info;
+  }
   table->part_info= part_info;
   table->file->set_part_info(part_info);
   if (part_info->default_engine_type == NULL)
@@ -3383,30 +3927,25 @@
   */
     uint part_func_len= part_info->part_func_len;
     uint subpart_func_len= part_info->subpart_func_len; 
-    uint bitmap_bits= part_info->no_subparts? 
-                       (part_info->no_subparts* part_info->no_parts):
-                        part_info->no_parts;
-    uint bitmap_bytes= bitmap_buffer_size(bitmap_bits);
-    uint32 *bitmap_buf;
-    char *part_func_string, *subpart_func_string= NULL;
-    if (!((part_func_string= thd->alloc(part_func_len))) ||
+    char *part_func_string= NULL;
+    char *subpart_func_string= NULL;
+    if ((part_func_len &&
+        !((part_func_string= thd->alloc(part_func_len)))) ||
         (subpart_func_len &&
-        !((subpart_func_string= thd->alloc(subpart_func_len)))) ||
-        !((bitmap_buf= (uint32*)thd->alloc(bitmap_bytes))))
+        !((subpart_func_string= thd->alloc(subpart_func_len)))))
     {
-      my_error(ER_OUTOFMEMORY, MYF(0), part_func_len);
+      mem_alloc_error(part_func_len);
       free_items(thd->free_list);
       part_info->item_free_list= 0;
       goto end;
     }
-    memcpy(part_func_string, part_info->part_func_string, part_func_len);
+    if (part_func_len)
+      memcpy(part_func_string, part_info->part_func_string, part_func_len);
     if (subpart_func_len)
       memcpy(subpart_func_string, part_info->subpart_func_string,
              subpart_func_len);
     part_info->part_func_string= part_func_string;
     part_info->subpart_func_string= subpart_func_string;
-
-    bitmap_init(&part_info->used_partitions, bitmap_buf, bitmap_bytes*8, FALSE);
   }
 
   result= FALSE;
@@ -3415,16 +3954,1482 @@
   thd->lex= old_lex;
   DBUG_RETURN(result);
 }
+
+
+/*
+  SYNOPSIS
+    fast_alter_partition_error_handler()
+    lpt                           Container for parameters
+
+  RETURN VALUES
+    None
+
+  DESCRIPTION
+    Support routine to clean up after failures of on-line ALTER TABLE
+    for partition management.
+*/
+
+static void fast_alter_partition_error_handler(ALTER_PARTITION_PARAM_TYPE *lpt)
+{
+  DBUG_ENTER("fast_alter_partition_error_handler");
+  /* TODO: WL 2826 Error handling */
+  DBUG_VOID_RETURN;
+}
+
+
+/*
+  SYNOPSIS
+    fast_end_partition()
+    thd                           Thread object
+    out:copied                    Number of records copied
+    out:deleted                   Number of records deleted
+    table_list                    Table list with the one table in it
+    empty                         Has nothing been done
+    lpt                           Struct to be used by error handler
+
+  RETURN VALUES
+    FALSE                         Success
+    TRUE                          Failure
+
+  DESCRIPTION
+    Support routine to handle the successful cases for partition
+    management.
+*/
+
+static int fast_end_partition(THD *thd, ulonglong copied,
+                              ulonglong deleted,
+                              TABLE_LIST *table_list, bool is_empty,
+                              ALTER_PARTITION_PARAM_TYPE *lpt,
+                              bool written_bin_log)
+{
+  int error;
+  DBUG_ENTER("fast_end_partition");
+
+  thd->proc_info="end";
+  if (!is_empty)
+    query_cache_invalidate3(thd, table_list, 0);
+  error= ha_commit_stmt(thd);
+  if (ha_commit(thd))
+    error= 1;
+  if (!error || is_empty)
+  {
+    char tmp_name[80];
+    if ((!is_empty) && (!written_bin_log) &&
+        (!thd->lex->no_write_to_binlog))
+      write_bin_log(thd, FALSE, thd->query, thd->query_length);
+    close_thread_tables(thd);
+    my_snprintf(tmp_name, sizeof(tmp_name), ER(ER_INSERT_INFO),
+                (ulong) (copied + deleted),
+                (ulong) deleted,
+                (ulong) 0);
+    send_ok(thd,copied+deleted,0L,tmp_name);
+    DBUG_RETURN(FALSE);
+  }
+  fast_alter_partition_error_handler(lpt);
+  DBUG_RETURN(TRUE);
+}
+
+
+/*
+  Check engine mix that it is correct
+  SYNOPSIS
+    check_engine_condition()
+    p_elem                   Partition element
+    default_engine           Have user specified engine on table level
+    inout::engine_type       Current engine used
+    inout::first             Is it first partition
+  RETURN VALUE
+    TRUE                     Failed check
+    FALSE                    Ok
+  DESCRIPTION
+    (specified partition handler ) specified table handler
+    (NDB, NDB) NDB           OK
+    (MYISAM, MYISAM) -       OK
+    (MYISAM, -)      -       NOT OK
+    (MYISAM, -)    MYISAM    OK
+    (- , MYISAM)   -         NOT OK
+    (- , -)        MYISAM    OK
+    (-,-)          -         OK
+    (NDB, MYISAM) *          NOT OK
+*/
+
+static bool check_engine_condition(partition_element *p_elem,
+                                   bool default_engine,
+                                   handlerton **engine_type,
+                                   bool *first)
+{
+  if (*first && default_engine)
+    *engine_type= p_elem->engine_type;
+  *first= FALSE;
+  if ((!default_engine &&
+      (p_elem->engine_type != *engine_type &&
+       !p_elem->engine_type)) ||
+      (default_engine &&
+       p_elem->engine_type != *engine_type))
+    return TRUE;
+  else
+    return FALSE;
+}
+
+/*
+  We need to check if engine used by all partitions can handle
+  partitioning natively.
+
+  SYNOPSIS
+    check_native_partitioned()
+    create_info            Create info in CREATE TABLE
+    out:ret_val            Return value
+    part_info              Partition info
+    thd                    Thread object
+
+  RETURN VALUES
+  Value returned in bool ret_value
+    TRUE                   Native partitioning supported by engine
+    FALSE                  Need to use partition handler
+
+  Return value from function
+    TRUE                   Error
+    FALSE                  Success
+*/
+
+static bool check_native_partitioned(HA_CREATE_INFO *create_info,bool *ret_val,
+                                     partition_info *part_info, THD *thd)
+{
+  List_iterator<partition_element> part_it(part_info->partitions);
+  bool first= TRUE;
+  bool default_engine;
+  handlerton *engine_type= create_info->db_type;
+  handlerton *old_engine_type= engine_type;
+  uint i= 0;
+  handler *file;
+  uint no_parts= part_info->partitions.elements;
+  DBUG_ENTER("check_native_partitioned");
+
+  default_engine= (create_info->used_fields | HA_CREATE_USED_ENGINE) ?
+                   TRUE : FALSE;
+  DBUG_PRINT("info", ("engine_type = %u, default = %u",
+                       ha_legacy_type(engine_type),
+                       default_engine));
+  if (no_parts)
+  {
+    do
+    {
+      partition_element *part_elem= part_it++;
+      if (is_sub_partitioned(part_info) &&
+          part_elem->subpartitions.elements)
+      {
+        uint no_subparts= part_elem->subpartitions.elements;
+        uint j= 0;
+        List_iterator<partition_element> sub_it(part_elem->subpartitions);
+        do
+        {
+          partition_element *sub_elem= sub_it++;
+          if (check_engine_condition(sub_elem, default_engine,
+                                     &engine_type, &first))
+            goto error;
+        } while (++j < no_subparts);
+        /*
+          In case of subpartitioning and defaults we allow that only
+          subparts have specified engines, as long as the parts haven't
+          specified the wrong engine it's ok.
+        */
+        if (check_engine_condition(part_elem, FALSE,
+                                   &engine_type, &first))
+          goto error;
+      }
+      else if (check_engine_condition(part_elem, default_engine,
+                                      &engine_type, &first))
+        goto error;
+    } while (++i < no_parts);
+  }
+
+  /*
+    All engines are of the same type. Check if this engine supports
+    native partitioning.
+  */
+
+  if (!engine_type)
+    engine_type= old_engine_type;
+  DBUG_PRINT("info", ("engine_type = %s",
+              ha_resolve_storage_engine_name(engine_type)));
+  if (engine_type->partition_flags &&
+      (engine_type->partition_flags() & HA_CAN_PARTITION))
+  {
+    create_info->db_type= engine_type;
+    DBUG_PRINT("info", ("Changed to native partitioning"));
+    *ret_val= TRUE;
+  }
+  DBUG_RETURN(FALSE);
+error:
+  /*
+    Mixed engines not yet supported but when supported it will need
+    the partition handler
+  */
+  *ret_val= FALSE;
+  DBUG_RETURN(TRUE);
+}
+
+
+/*
+  Prepare for ALTER TABLE of partition structure
+
+  SYNOPSIS
+    prep_alter_part_table()
+    thd                        Thread object
+    table                      Table object
+    inout:alter_info           Alter information
+    inout:create_info          Create info for CREATE TABLE
+    old_db_type                Old engine type
+    out:partition_changed      Boolean indicating whether partition changed
+    out:fast_alter_partition   Boolean indicating whether fast partition
+                               change is requested
+
+  RETURN VALUES
+    TRUE                       Error
+    FALSE                      Success
+    partition_changed
+    fast_alter_partition
+
+  DESCRIPTION
+    This method handles all preparations for ALTER TABLE for partitioned
+    tables
+    We need to handle both partition management command such as Add Partition
+    and others here as well as an ALTER TABLE that completely changes the
+    partitioning and yet others that don't change anything at all. We start
+    by checking the partition management variants and then check the general
+    change patterns.
+*/
+
+uint prep_alter_part_table(THD *thd, TABLE *table, ALTER_INFO *alter_info,
+                           HA_CREATE_INFO *create_info,
+                           handlerton *old_db_type,
+                           bool *partition_changed,
+                           uint *fast_alter_partition)
+{
+  DBUG_ENTER("prep_alter_part_table");
+
+  if (alter_info->flags &
+      (ALTER_ADD_PARTITION | ALTER_DROP_PARTITION |
+       ALTER_COALESCE_PARTITION | ALTER_REORGANIZE_PARTITION |
+       ALTER_TABLE_REORG | ALTER_OPTIMIZE_PARTITION |
+       ALTER_CHECK_PARTITION | ALTER_ANALYZE_PARTITION |
+       ALTER_REPAIR_PARTITION | ALTER_REBUILD_PARTITION))
+  {
+    partition_info *tab_part_info= table->part_info;
+    if (!tab_part_info)
+    {
+      my_error(ER_PARTITION_MGMT_ON_NONPARTITIONED, MYF(0));
+      DBUG_RETURN(TRUE);
+    }
+    /*
+      We are going to manipulate the partition info on the table object
+      so we need to ensure that the data structure of the table object
+      is freed by setting version to 0. table->s->version= 0 forces a
+      flush of the table object in close_thread_tables().
+    */
+    uint flags;
+    table->s->version= 0L;
+    if (alter_info->flags == ALTER_TABLE_REORG)
+    {
+      uint new_part_no, curr_part_no;
+      ulonglong max_rows= table->s->max_rows;
+      if (tab_part_info->part_type != HASH_PARTITION ||
+          tab_part_info->use_default_no_partitions)
+      {
+        my_error(ER_REORG_NO_PARAM_ERROR, MYF(0));
+        DBUG_RETURN(TRUE);
+      }
+      new_part_no= table->file->get_default_no_partitions(max_rows);
+      curr_part_no= tab_part_info->no_parts;
+      if (new_part_no == curr_part_no)
+      {
+        /*
+          No change is needed, we will have the same number of partitions
+          after the change as before. Thus we can reply ok immediately
+          without any changes at all.
+        */
+        DBUG_RETURN(fast_end_partition(thd, ULL(0), ULL(0), NULL,
+                                       TRUE, NULL, FALSE));
+      }
+      else if (new_part_no > curr_part_no)
+      {
+        /*
+          We will add more partitions, we use the ADD PARTITION without
+          setting the flag for no default number of partitions
+        */
+        alter_info->flags|= ALTER_ADD_PARTITION;
+        thd->lex->part_info->no_parts= new_part_no - curr_part_no;
+      }
+      else
+      {
+        /*
+          We will remove hash partitions, we use the COALESCE PARTITION
+          without setting the flag for no default number of partitions
+        */
+        alter_info->flags|= ALTER_COALESCE_PARTITION;
+        alter_info->no_parts= curr_part_no - new_part_no;
+      }
+    }
+    if (table->s->db_type->alter_table_flags &&
+        (!(flags= table->s->db_type->alter_table_flags(alter_info->flags))))
+    {
+      my_error(ER_PARTITION_FUNCTION_FAILURE, MYF(0));
+      DBUG_RETURN(1);
+    }
+    *fast_alter_partition= flags ^ HA_PARTITION_FUNCTION_SUPPORTED;
+    if (alter_info->flags & ALTER_ADD_PARTITION)
+    {
+      /*
+        We start by moving the new partitions to the list of temporary
+        partitions. We will then check that the new partitions fit in the
+        partitioning scheme as currently set-up.
+        Partitions are always added at the end in ADD PARTITION.
+      */
+      partition_info *alt_part_info= thd->lex->part_info;
+      uint no_new_partitions= alt_part_info->no_parts;
+      uint no_orig_partitions= tab_part_info->no_parts;
+      uint check_total_partitions= no_new_partitions + no_orig_partitions;
+      uint new_total_partitions= check_total_partitions;
+      /*
+        We allow quite a lot of values to be supplied by defaults, however we
+        must know the number of new partitions in this case.
+      */
+      if (thd->lex->no_write_to_binlog &&
+          tab_part_info->part_type != HASH_PARTITION)
+      {
+        my_error(ER_NO_BINLOG_ERROR, MYF(0));
+        DBUG_RETURN(TRUE);
+      } 
+      if (no_new_partitions == 0)
+      {
+        my_error(ER_ADD_PARTITION_NO_NEW_PARTITION, MYF(0));
+        DBUG_RETURN(TRUE);
+      }
+      if (is_sub_partitioned(tab_part_info))
+      {
+        if (alt_part_info->no_subparts == 0)
+          alt_part_info->no_subparts= tab_part_info->no_subparts;
+        else if (alt_part_info->no_subparts != tab_part_info->no_subparts)
+        {
+          my_error(ER_ADD_PARTITION_SUBPART_ERROR, MYF(0));
+          DBUG_RETURN(TRUE);
+        }
+        check_total_partitions= new_total_partitions*
+                                alt_part_info->no_subparts;
+      }
+      if (check_total_partitions > MAX_PARTITIONS)
+      {
+        my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0));
+        DBUG_RETURN(TRUE);
+      }
+      alt_part_info->part_type= tab_part_info->part_type;
+      if (set_up_defaults_for_partitioning(alt_part_info,
+                                           table->file,
+                                           ULL(0),
+                                           tab_part_info->no_parts))
+      {
+        DBUG_RETURN(TRUE);
+      }
+/*
+Handling of on-line cases:
+
+ADD PARTITION for RANGE/LIST PARTITIONING:
+------------------------------------------
+For range and list partitions add partition is simply adding a
+new empty partition to the table. If the handler support this we
+will use the simple method of doing this. The figure below shows
+an example of this and the states involved in making this change.
+            
+Existing partitions                                     New added partitions
+------       ------        ------        ------      |  ------    ------
+|    |       |    |        |    |        |    |      |  |    |    |    |
+| p0 |       | p1 |        | p2 |        | p3 |      |  | p4 |    | p5 |
+------       ------        ------        ------      |  ------    ------
+PART_NORMAL  PART_NORMAL   PART_NORMAL   PART_NORMAL    PART_TO_BE_ADDED*2
+PART_NORMAL  PART_NORMAL   PART_NORMAL   PART_NORMAL    PART_IS_ADDED*2
+
+The first line is the states before adding the new partitions and the 
+second line is after the new partitions are added. All the partitions are
+in the partitions list, no partitions are placed in the temp_partitions
+list.
+
+ADD PARTITION for HASH PARTITIONING
+-----------------------------------
+This little figure tries to show the various partitions involved when
+adding two new partitions to a linear hash based partitioned table with
+four partitions to start with, which lists are used and the states they
+pass through. Adding partitions to a normal hash based is similar except
+that it is always all the existing partitions that are reorganised not
+only a subset of them.
+
+Existing partitions                                     New added partitions
+------       ------        ------        ------      |  ------    ------
+|    |       |    |        |    |        |    |      |  |    |    |    |
+| p0 |       | p1 |        | p2 |        | p3 |      |  | p4 |    | p5 |
+------       ------        ------        ------      |  ------    ------
+PART_CHANGED PART_CHANGED  PART_NORMAL   PART_NORMAL    PART_TO_BE_ADDED
+PART_IS_CHANGED*2          PART_NORMAL   PART_NORMAL    PART_IS_ADDED
+PART_NORMAL  PART_NORMAL   PART_NORMAL   PART_NORMAL    PART_IS_ADDED
+
+Reorganised existing partitions
+------      ------
+|    |      |    |
+| p0'|      | p1'|
+------      ------
+
+p0 - p5 will be in the partitions list of partitions.
+p0' and p1' will actually not exist as separate objects, there presence can
+be deduced from the state of the partition and also the names of those
+partitions can be deduced this way.
+
+After adding the partitions and copying the partition data to p0', p1',
+p4 and p5 from p0 and p1 the states change to adapt for the new situation
+where p0 and p1 is dropped and replaced by p0' and p1' and the new p4 and
+p5 are in the table again.
+
+The first line above shows the states of the partitions before we start
+adding and copying partitions, the second after completing the adding
+and copying and finally the third line after also dropping the partitions
+that are reorganised.
+*/
+      if (*fast_alter_partition &&
+          tab_part_info->part_type == HASH_PARTITION)
+      {
+        uint part_no= 0, start_part= 1, start_sec_part= 1;
+        uint end_part= 0, end_sec_part= 0;
+        uint upper_2n= tab_part_info->linear_hash_mask + 1;
+        uint lower_2n= upper_2n >> 1;
+        bool all_parts= TRUE;
+        if (tab_part_info->linear_hash_ind &&
+            no_new_partitions < upper_2n)
+        {
+          /*
+            An analysis of which parts needs reorganisation shows that it is
+            divided into two intervals. The first interval is those parts
+            that are reorganised up until upper_2n - 1. From upper_2n and
+            onwards it starts again from partition 0 and goes on until
+            it reaches p(upper_2n - 1). If the last new partition reaches
+            beyond upper_2n - 1 then the first interval will end with
+            p(lower_2n - 1) and start with p(no_orig_partitions - lower_2n).
+            If lower_2n partitions are added then p0 to p(lower_2n - 1) will
+            be reorganised which means that the two interval becomes one
+            interval at this point. Thus only when adding less than
+            lower_2n partitions and going beyond a total of upper_2n we
+            actually get two intervals.
+
+            To exemplify this assume we have 6 partitions to start with and
+            add 1, 2, 3, 5, 6, 7, 8, 9 partitions.
+            The first to add after p5 is p6 = 110 in bit numbers. Thus we
+            can see that 10 = p2 will be partition to reorganise if only one
+            partition.
+            If 2 partitions are added we reorganise [p2, p3]. Those two
+            cases are covered by the second if part below.
+            If 3 partitions are added we reorganise [p2, p3] U [p0,p0]. This
+            part is covered by the else part below.
+            If 5 partitions are added we get [p2,p3] U [p0, p2] = [p0, p3].
+            This is covered by the first if part where we need the max check
+            to here use lower_2n - 1.
+            If 7 partitions are added we get [p2,p3] U [p0, p4] = [p0, p4].
+            This is covered by the first if part but here we use the first
+            calculated end_part.
+            Finally with 9 new partitions we would also reorganise p6 if we
+            used the method below but we cannot reorganise more partitions
+            than what we had from the start and thus we simply set all_parts
+            to TRUE. In this case we don't get into this if-part at all.
+          */
+          all_parts= FALSE;
+          if (no_new_partitions >= lower_2n)
+          {
+            /*
+              In this case there is only one interval since the two intervals
+              overlap and this starts from zero to last_part_no - upper_2n
+            */
+            start_part= 0;
+            end_part= new_total_partitions - (upper_2n + 1);
+            end_part= max(lower_2n - 1, end_part);
+          }
+          else if (new_total_partitions <= upper_2n)
+          {
+            /*
+              Also in this case there is only one interval since we are not
+              going over a 2**n boundary
+            */
+            start_part= no_orig_partitions - lower_2n;
+            end_part= start_part + (no_new_partitions - 1);
+          }
+          else
+          {
+            /* We have two non-overlapping intervals since we are not
+               passing a 2**n border and we have not at least lower_2n
+               new parts that would ensure that the intervals become
+               overlapping.
+            */
+            start_part= no_orig_partitions - lower_2n;
+            end_part= upper_2n - 1;
+            start_sec_part= 0;
+            end_sec_part= new_total_partitions - (upper_2n + 1);
+          }
+        }
+        List_iterator<partition_element> tab_it(tab_part_info->partitions);
+        part_no= 0;
+        do
+        {
+          partition_element *p_elem= tab_it++;
+          if (all_parts ||
+              (part_no >= start_part && part_no <= end_part) ||
+              (part_no >= start_sec_part && part_no <= end_sec_part))
+          {
+            p_elem->part_state= PART_CHANGED;
+          }
+        } while (++part_no < no_orig_partitions);
+      }
+      /*
+        Need to concatenate the lists here to make it possible to check the
+        partition info for correctness using check_partition_info.
+        For on-line add partition we set the state of this partition to
+        PART_TO_BE_ADDED to ensure that it is known that it is not yet
+        usable (becomes usable when partition is created and the switch of
+        partition configuration is made.
+      */
+      {
+        List_iterator<partition_element> alt_it(alt_part_info->partitions);
+        uint part_count= 0;
+        do
+        {
+          partition_element *part_elem= alt_it++;
+          if (*fast_alter_partition)
+            part_elem->part_state= PART_TO_BE_ADDED;
+          if (tab_part_info->partitions.push_back(part_elem))
+          {
+            mem_alloc_error(1);
+            DBUG_RETURN(TRUE);
+          }
+        } while (++part_count < no_new_partitions);
+        tab_part_info->no_parts+= no_new_partitions;
+      }
+      /*
+        If we specify partitions explicitly we don't use defaults anymore.
+        Using ADD PARTITION also means that we don't have the default number
+        of partitions anymore. We use this code also for Table reorganisations
+        and here we don't set any default flags to FALSE.
+      */
+      if (!(alter_info->flags & ALTER_TABLE_REORG))
+      {
+        if (!alt_part_info->use_default_partitions)
+        {
+          DBUG_PRINT("info", ("part_info= %x", tab_part_info));
+          tab_part_info->use_default_partitions= FALSE;
+        }
+        tab_part_info->use_default_no_partitions= FALSE;
+      }
+    }
+    else if (alter_info->flags == ALTER_DROP_PARTITION)
+    {
+      /*
+        Drop a partition from a range partition and list partitioning is
+        always safe and can be made more or less immediate. It is necessary
+        however to ensure that the partition to be removed is safely removed
+        and that REPAIR TABLE can remove the partition if for some reason the
+        command to drop the partition failed in the middle.
+      */
+      uint part_count= 0;
+      uint no_parts_dropped= alter_info->partition_names.elements;
+      uint no_parts_found= 0;
+      List_iterator<partition_element> part_it(tab_part_info->partitions);
+      if (!(tab_part_info->part_type == RANGE_PARTITION ||
+            tab_part_info->part_type == LIST_PARTITION))
+      {
+        my_error(ER_ONLY_ON_RANGE_LIST_PARTITION, MYF(0), "DROP");
+        DBUG_RETURN(TRUE);
+      }
+      if (no_parts_dropped >= tab_part_info->no_parts)
+      {
+        my_error(ER_DROP_LAST_PARTITION, MYF(0));
+        DBUG_RETURN(TRUE);
+      }
+      do
+      {
+        partition_element *part_elem= part_it++;
+        if (is_name_in_list(part_elem->partition_name,
+                            alter_info->partition_names))
+        {
+          /*
+            Set state to indicate that the partition is to be dropped.
+          */
+          no_parts_found++;
+          part_elem->part_state= PART_TO_BE_DROPPED;
+        }
+      } while (++part_count < tab_part_info->no_parts);
+      if (no_parts_found != no_parts_dropped)
+      {
+        my_error(ER_DROP_PARTITION_NON_EXISTENT, MYF(0), "DROP");
+        DBUG_RETURN(TRUE);
+      }
+      if (table->file->is_fk_defined_on_table_or_index(MAX_KEY))
+      {
+        my_error(ER_ROW_IS_REFERENCED, MYF(0));
+        DBUG_RETURN(TRUE);
+      }
+    }
+    else if ((alter_info->flags & ALTER_OPTIMIZE_PARTITION) ||
+             (alter_info->flags & ALTER_ANALYZE_PARTITION) ||
+             (alter_info->flags & ALTER_CHECK_PARTITION) ||
+             (alter_info->flags & ALTER_REPAIR_PARTITION) ||
+             (alter_info->flags & ALTER_REBUILD_PARTITION))
+    {
+      uint no_parts_opt= alter_info->partition_names.elements;
+      uint part_count= 0;
+      uint no_parts_found= 0;
+      List_iterator<partition_element> part_it(tab_part_info->partitions);
+
+      do
+      {
+        partition_element *part_elem= part_it++;
+        if ((alter_info->flags & ALTER_ALL_PARTITION) ||
+            (is_name_in_list(part_elem->partition_name,
+                             alter_info->partition_names)))
+        {
+          /*
+            Mark the partition as a partition to be "changed" by
+            analyzing/optimizing/rebuilding/checking/repairing
+          */
+          no_parts_found++;
+          part_elem->part_state= PART_CHANGED;
+        }
+      } while (++part_count < tab_part_info->no_parts);
+      if (no_parts_found != no_parts_opt &&
+          (!(alter_info->flags & ALTER_ALL_PARTITION)))
+      {
+        const char *ptr;
+        if (alter_info->flags & ALTER_OPTIMIZE_PARTITION)
+          ptr= "OPTIMIZE";
+        else if (alter_info->flags & ALTER_ANALYZE_PARTITION)
+          ptr= "ANALYZE";
+        else if (alter_info->flags & ALTER_CHECK_PARTITION)
+          ptr= "CHECK";
+        else if (alter_info->flags & ALTER_REPAIR_PARTITION)
+          ptr= "REPAIR";
+        else
+          ptr= "REBUILD";
+        my_error(ER_DROP_PARTITION_NON_EXISTENT, MYF(0), ptr);
+        DBUG_RETURN(TRUE);
+      }
+    }
+    else if (alter_info->flags & ALTER_COALESCE_PARTITION)
+    {
+      uint no_parts_coalesced= alter_info->no_parts;
+      uint no_parts_remain= tab_part_info->no_parts - no_parts_coalesced;
+      List_iterator<partition_element> part_it(tab_part_info->partitions);
+      if (tab_part_info->part_type != HASH_PARTITION)
+      {
+        my_error(ER_COALESCE_ONLY_ON_HASH_PARTITION, MYF(0));
+        DBUG_RETURN(TRUE);
+      }
+      if (no_parts_coalesced == 0)
+      {
+        my_error(ER_COALESCE_PARTITION_NO_PARTITION, MYF(0));
+        DBUG_RETURN(TRUE);
+      }
+      if (no_parts_coalesced >= tab_part_info->no_parts)
+      {
+        my_error(ER_DROP_LAST_PARTITION, MYF(0));
+        DBUG_RETURN(TRUE);
+      }
+/*
+Online handling:
+COALESCE PARTITION:
+-------------------
+The figure below shows the manner in which partitions are handled when
+performing an on-line coalesce partition and which states they go through
+at start, after adding and copying partitions and finally after dropping
+the partitions to drop. The figure shows an example using four partitions
+to start with, using linear hash and coalescing one partition (always the
+last partition).
+
+Using linear hash then all remaining partitions will have a new reorganised
+part.
+
+Existing partitions                     Coalesced partition 
+------       ------              ------   |      ------
+|    |       |    |              |    |   |      |    |
+| p0 |       | p1 |              | p2 |   |      | p3 |
+------       ------              ------   |      ------
+PART_NORMAL  PART_CHANGED        PART_NORMAL     PART_REORGED_DROPPED
+PART_NORMAL  PART_IS_CHANGED     PART_NORMAL     PART_TO_BE_DROPPED
+PART_NORMAL  PART_NORMAL         PART_NORMAL     PART_IS_DROPPED
+
+Reorganised existing partitions
+            ------
+            |    |
+            | p1'|
+            ------
+
+p0 - p3 is in the partitions list.
+The p1' partition will actually not be in any list it is deduced from the
+state of p1.
+*/
+      {
+        uint part_count= 0, start_part= 1, start_sec_part= 1;
+        uint end_part= 0, end_sec_part= 0;
+        bool all_parts= TRUE;
+        if (*fast_alter_partition &&
+            tab_part_info->linear_hash_ind)
+        {
+          uint upper_2n= tab_part_info->linear_hash_mask + 1;
+          uint lower_2n= upper_2n >> 1;
+          all_parts= FALSE;
+          if (no_parts_coalesced >= lower_2n)
+          {
+            all_parts= TRUE;
+          }
+          else if (no_parts_remain >= lower_2n)
+          {
+            end_part= tab_part_info->no_parts - (lower_2n + 1);
+            start_part= no_parts_remain - lower_2n;
+          }
+          else
+          {
+            start_part= 0;
+            end_part= tab_part_info->no_parts - (lower_2n + 1);
+            end_sec_part= (lower_2n >> 1) - 1;
+            start_sec_part= end_sec_part - (lower_2n - (no_parts_remain + 1));
+          }
+        }
+        do
+        {
+          partition_element *p_elem= part_it++;
+          if (*fast_alter_partition &&
+              (all_parts ||
+              (part_count >= start_part && part_count <= end_part) ||
+              (part_count >= start_sec_part && part_count <= end_sec_part)))
+            p_elem->part_state= PART_CHANGED;
+          if (++part_count > no_parts_remain)
+          {
+            if (*fast_alter_partition)
+              p_elem->part_state= PART_REORGED_DROPPED;
+            else
+              part_it.remove();
+          }
+        } while (part_count < tab_part_info->no_parts);
+        tab_part_info->no_parts= no_parts_remain;
+      }
+      if (!(alter_info->flags & ALTER_TABLE_REORG))
+        tab_part_info->use_default_no_partitions= FALSE;
+    }
+    else if (alter_info->flags == ALTER_REORGANIZE_PARTITION)
+    {
+      /*
+        Reorganise partitions takes a number of partitions that are next
+        to each other (at least for RANGE PARTITIONS) and then uses those
+        to create a set of new partitions. So data is copied from those
+        partitions into the new set of partitions. Those new partitions
+        can have more values in the LIST value specifications or less both
+        are allowed. The ranges can be different but since they are 
+        changing a set of consecutive partitions they must cover the same
+        range as those changed from.
+        This command can be used on RANGE and LIST partitions.
+      */
+      uint no_parts_reorged= alter_info->partition_names.elements;
+      uint no_parts_new= thd->lex->part_info->partitions.elements;
+      partition_info *alt_part_info= thd->lex->part_info;
+      uint check_total_partitions;
+      if (no_parts_reorged > tab_part_info->no_parts)
+      {
+        my_error(ER_REORG_PARTITION_NOT_EXIST, MYF(0));
+        DBUG_RETURN(TRUE);
+      }
+      if (!(tab_part_info->part_type == RANGE_PARTITION ||
+            tab_part_info->part_type == LIST_PARTITION) &&
+           (no_parts_new != no_parts_reorged))
+      {
+        my_error(ER_REORG_HASH_ONLY_ON_SAME_NO, MYF(0));
+        DBUG_RETURN(TRUE);
+      }
+      check_total_partitions= tab_part_info->no_parts + no_parts_new;
+      check_total_partitions-= no_parts_reorged;
+      if (check_total_partitions > MAX_PARTITIONS)
+      {
+        my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0));
+        DBUG_RETURN(TRUE);
+      }
+/*
+Online handling:
+REORGANIZE PARTITION:
+---------------------
+The figure exemplifies the handling of partitions, their state changes and
+how they are organised. It exemplifies four partitions where two of the
+partitions are reorganised (p1 and p2) into two new partitions (p4 and p5).
+The reason of this change could be to change range limits, change list
+values or for hash partitions simply reorganise the partition which could
+also involve moving them to new disks or new node groups (MySQL Cluster).
+
+Existing partitions                                  
+------       ------        ------        ------
+|    |       |    |        |    |        |    |
+| p0 |       | p1 |        | p2 |        | p3 |
+------       ------        ------        ------
+PART_NORMAL  PART_TO_BE_REORGED          PART_NORMAL
+PART_NORMAL  PART_TO_BE_DROPPED          PART_NORMAL
+PART_NORMAL  PART_IS_DROPPED             PART_NORMAL
+
+Reorganised new partitions (replacing p1 and p2)
+------      ------
+|    |      |    |
+| p4 |      | p5 |
+------      ------
+PART_TO_BE_ADDED
+PART_IS_ADDED
+PART_IS_ADDED
+
+All unchanged partitions and the new partitions are in the partitions list
+in the order they will have when the change is completed. The reorganised
+partitions are placed in the temp_partitions list. PART_IS_ADDED is only a
+temporary state not written in the frm file. It is used to ensure we write
+the generated partition syntax in a correct manner.
+*/
+      {
+        List_iterator<partition_element> tab_it(tab_part_info->partitions);
+        uint part_count= 0;
+        bool found_first= FALSE;
+        bool found_last= FALSE;
+        bool is_last_partition_reorged;
+        uint drop_count= 0;
+        longlong tab_max_range= 0, alt_max_range= 0;
+        do
+        {
+          partition_element *part_elem= tab_it++;
+          is_last_partition_reorged= FALSE;
+          if (is_name_in_list(part_elem->partition_name,
+                              alter_info->partition_names))
+          {
+            is_last_partition_reorged= TRUE;
+            drop_count++;
+            tab_max_range= part_elem->range_value;
+            if (*fast_alter_partition &&
+                tab_part_info->temp_partitions.push_back(part_elem))
+            {
+              mem_alloc_error(1);
+              DBUG_RETURN(TRUE);
+            }
+            if (*fast_alter_partition)
+              part_elem->part_state= PART_TO_BE_REORGED;
+            if (!found_first)
+            {
+              uint alt_part_count= 0;
+              found_first= TRUE;
+              List_iterator<partition_element>
+                                 alt_it(alt_part_info->partitions);
+              do
+              {
+                partition_element *alt_part_elem= alt_it++;
+                alt_max_range= alt_part_elem->range_value;
+                if (*fast_alter_partition)
+                  alt_part_elem->part_state= PART_TO_BE_ADDED;
+                if (alt_part_count == 0)
+                  tab_it.replace(alt_part_elem);
+                else
+                  tab_it.after(alt_part_elem);
+              } while (++alt_part_count < no_parts_new);
+            }
+            else if (found_last)
+            {
+              my_error(ER_CONSECUTIVE_REORG_PARTITIONS, MYF(0));
+              DBUG_RETURN(TRUE);
+            }
+            else
+              tab_it.remove();
+          }
+          else
+          {
+            if (found_first)
+              found_last= TRUE;
+          }
+        } while (++part_count < tab_part_info->no_parts);
+        if (drop_count != no_parts_reorged)
+        {
+          my_error(ER_DROP_PARTITION_NON_EXISTENT, MYF(0), "REORGANIZE");
+          DBUG_RETURN(TRUE);
+        }
+        if (tab_part_info->part_type == RANGE_PARTITION &&
+            ((is_last_partition_reorged &&
+               alt_max_range < tab_max_range) ||
+              (!is_last_partition_reorged &&
+               alt_max_range != tab_max_range)))
+        {
+          /*
+            For range partitioning the total resulting range before and
+            after the change must be the same except in one case. This is
+            when the last partition is reorganised, in this case it is
+            acceptable to increase the total range.
+            The reason is that it is not allowed to have "holes" in the
+            middle of the ranges and thus we should not allow to reorganise
+            to create "holes". Also we should not allow using REORGANIZE
+            to drop data.
+          */
+          my_error(ER_REORG_OUTSIDE_RANGE, MYF(0));
+          DBUG_RETURN(TRUE);
+        }
+        tab_part_info->no_parts= check_total_partitions;
+      }
+    }
+    else
+    {
+      DBUG_ASSERT(FALSE);
+    }
+    *partition_changed= TRUE;
+    create_info->db_type= &partition_hton;
+    thd->lex->part_info= tab_part_info;
+    if (alter_info->flags == ALTER_ADD_PARTITION ||
+        alter_info->flags == ALTER_REORGANIZE_PARTITION)
+    {
+      if (check_partition_info(tab_part_info, (handlerton**)NULL,
+                               table->file, ULL(0)))
+      {
+        DBUG_RETURN(TRUE);
+      }
+    }
+  }
+  else
+  {
+    /*
+     When thd->lex->part_info has a reference to a partition_info the
+     ALTER TABLE contained a definition of a partitioning.
+
+     Case I:
+       If there was a partition before and there is a new one defined.
+       We use the new partitioning. The new partitioning is already
+       defined in the correct variable so no work is needed to
+       accomplish this.
+       We do however need to update partition_changed to ensure that not
+       only the frm file is changed in the ALTER TABLE command.
+
+     Case IIa:
+       There was a partitioning before and there is no new one defined.
+       Also the user has not specified an explicit engine to use.
+
+       We use the old partitioning also for the new table. We do this
+       by assigning the partition_info from the table loaded in
+       open_ltable to the partition_info struct used by mysql_create_table
+       later in this method.
+
+     Case IIb:
+       There was a partitioning before and there is no new one defined.
+       The user has specified an explicit engine to use.
+
+       Since the user has specified an explicit engine to use we override
+       the old partitioning info and create a new table using the specified
+       engine. This is the reason for the extra check if old and new engine
+       is equal.
+       In this case the partition also is changed.
+
+     Case III:
+       There was no partitioning before altering the table, there is
+       partitioning defined in the altered table. Use the new partitioning.
+       No work needed since the partitioning info is already in the
+       correct variable.
+
+       In this case we discover one case where the new partitioning is using
+       the same partition function as the default (PARTITION BY KEY or
+       PARTITION BY LINEAR KEY with the list of fields equal to the primary
+       key fields OR PARTITION BY [LINEAR] KEY() for tables without primary
+       key)
+       Also here partition has changed and thus a new table must be
+       created.
+
+     Case IV:
+       There was no partitioning before and no partitioning defined.
+       Obviously no work needed.
+    */
+    if (table->part_info)
+    {
+      if (!thd->lex->part_info &&
+          create_info->db_type == old_db_type)
+        thd->lex->part_info= table->part_info;
+    }
+    if (thd->lex->part_info)
+    {
+      /*
+        Need to cater for engine types that can handle partition without
+        using the partition handler.
+      */
+      if (thd->lex->part_info != table->part_info)
+        *partition_changed= TRUE;
+      if (create_info->db_type == &partition_hton)
+      {
+        if (table->part_info)
+        {
+          thd->lex->part_info->default_engine_type=
+                               table->part_info->default_engine_type;
+        }
+        else
+        {
+          thd->lex->part_info->default_engine_type= 
+                           ha_checktype(thd, DB_TYPE_DEFAULT, FALSE, FALSE);
+        }
+      }
+      else
+      {
+        bool is_native_partitioned= FALSE;
+        partition_info *part_info= thd->lex->part_info;
+        part_info->default_engine_type= create_info->db_type;
+        if (check_native_partitioned(create_info, &is_native_partitioned,
+                                     part_info, thd))
+        {
+          DBUG_RETURN(TRUE);
+        }
+        if (!is_native_partitioned)
+        {
+          DBUG_ASSERT(create_info->db_type != &default_hton);
+          create_info->db_type= &partition_hton;
+        }
+      }
+      DBUG_PRINT("info", ("default_db_type = %s",
+                 thd->lex->part_info->default_engine_type->name));
+    }
+  }
+  DBUG_RETURN(FALSE);
+}
+
+
+/*
+  Change partitions, used to implement ALTER TABLE ADD/REORGANIZE/COALESCE
+  partitions. This method is used to implement both single-phase and multi-
+  phase implementations of ADD/REORGANIZE/COALESCE partitions.
+
+  SYNOPSIS
+    mysql_change_partitions()
+    lpt                        Struct containing parameters
+
+  RETURN VALUES
+    TRUE                          Failure
+    FALSE                         Success
+
+  DESCRIPTION
+    Request handler to add partitions as set in states of the partition
+
+    Elements of the lpt parameters used:
+    create_info                Create information used to create partitions
+    db                         Database name
+    table_name                 Table name
+    copied                     Output parameter where number of copied
+                               records are added
+    deleted                    Output parameter where number of deleted
+                               records are added
+*/
+
+static bool mysql_change_partitions(ALTER_PARTITION_PARAM_TYPE *lpt)
+{
+  char path[FN_REFLEN+1];
+  DBUG_ENTER("mysql_change_partitions");
+
+  build_table_filename(path, sizeof(path), lpt->db, lpt->table_name, "");
+  DBUG_RETURN(lpt->table->file->change_partitions(lpt->create_info, path,
+                                                  &lpt->copied,
+                                                  &lpt->deleted,
+                                                  lpt->pack_frm_data,
+                                                  lpt->pack_frm_len));
+}
+
+
+/*
+  Rename partitions in an ALTER TABLE of partitions
+
+  SYNOPSIS
+    mysql_rename_partitions()
+    lpt                        Struct containing parameters
+
+  RETURN VALUES
+    TRUE                          Failure
+    FALSE                         Success
+
+  DESCRIPTION
+    Request handler to rename partitions as set in states of the partition
+
+    Parameters used:
+    db                         Database name
+    table_name                 Table name
+*/
+
+static bool mysql_rename_partitions(ALTER_PARTITION_PARAM_TYPE *lpt)
+{
+  char path[FN_REFLEN+1];
+  DBUG_ENTER("mysql_rename_partitions");
+
+  build_table_filename(path, sizeof(path), lpt->db, lpt->table_name, "");
+  DBUG_RETURN(lpt->table->file->rename_partitions(path));
+}
+
+
+/*
+  Drop partitions in an ALTER TABLE of partitions
+
+  SYNOPSIS
+    mysql_drop_partitions()
+    lpt                        Struct containing parameters
+
+  RETURN VALUES
+    TRUE                          Failure
+    FALSE                         Success
+  DESCRIPTION
+    Drop the partitions marked with PART_TO_BE_DROPPED state and remove
+    those partitions from the list.
+
+    Parameters used:
+    table                       Table object
+    db                          Database name
+    table_name                  Table name
+*/
+
+static bool mysql_drop_partitions(ALTER_PARTITION_PARAM_TYPE *lpt)
+{
+  char path[FN_REFLEN+1];
+  partition_info *part_info= lpt->table->part_info;
+  List_iterator<partition_element> part_it(part_info->partitions);
+  uint i= 0;
+  uint remove_count= 0;
+  DBUG_ENTER("mysql_drop_partitions");
+
+  build_table_filename(path, sizeof(path), lpt->db, lpt->table_name, "");
+  if (lpt->table->file->drop_partitions(path))
+  {
+    DBUG_RETURN(TRUE);
+  }
+  do
+  {
+    partition_element *part_elem= part_it++;
+    if (part_elem->part_state == PART_IS_DROPPED)
+    {
+      part_it.remove();
+      remove_count++;
+    }
+  } while (++i < part_info->no_parts);
+  part_info->no_parts-= remove_count;
+  DBUG_RETURN(FALSE);
+}
+
+
+/*
+  Actually perform the change requested by ALTER TABLE of partitions
+  previously prepared.
+
+  SYNOPSIS
+    fast_alter_partition_table()
+    thd                           Thread object
+    table                         Table object
+    alter_info                    ALTER TABLE info
+    create_info                   Create info for CREATE TABLE
+    table_list                    List of the table involved
+    create_list                   The fields in the resulting table
+    key_list                      The keys in the resulting table
+    db                            Database name of new table
+    table_name                    Table name of new table
+
+  RETURN VALUES
+    TRUE                          Error
+    FALSE                         Success
+
+  DESCRIPTION
+    Perform all ALTER TABLE operations for partitioned tables that can be
+    performed fast without a full copy of the original table.
+*/
+
+uint fast_alter_partition_table(THD *thd, TABLE *table,
+                                ALTER_INFO *alter_info,
+                                HA_CREATE_INFO *create_info,
+                                TABLE_LIST *table_list,
+                                List<create_field> *create_list,
+                                List<Key> *key_list, const char *db,
+                                const char *table_name,
+                                uint fast_alter_partition)
+{
+  /* Set-up struct used to write frm files */
+  ulonglong copied= 0;
+  ulonglong deleted= 0;
+  partition_info *part_info= table->part_info;
+  ALTER_PARTITION_PARAM_TYPE lpt_obj;
+  ALTER_PARTITION_PARAM_TYPE *lpt= &lpt_obj;
+  bool written_bin_log= TRUE;
+  DBUG_ENTER("fast_alter_partition_table");
+
+  lpt->thd= thd;
+  lpt->create_info= create_info;
+  lpt->create_list= create_list;
+  lpt->key_list= key_list;
+  lpt->db_options= create_info->table_options;
+  if (create_info->row_type == ROW_TYPE_DYNAMIC)
+    lpt->db_options|= HA_OPTION_PACK_RECORD;
+  lpt->table= table;
+  lpt->key_info_buffer= 0;
+  lpt->key_count= 0;
+  lpt->db= db;
+  lpt->table_name= table_name;
+  lpt->copied= 0;
+  lpt->deleted= 0;
+  lpt->pack_frm_data= NULL;
+  lpt->pack_frm_len= 0;
+  thd->lex->part_info= part_info;
+
+  if (alter_info->flags & ALTER_OPTIMIZE_PARTITION ||
+      alter_info->flags & ALTER_ANALYZE_PARTITION ||
+      alter_info->flags & ALTER_CHECK_PARTITION ||
+      alter_info->flags & ALTER_REPAIR_PARTITION)
+  {
+    /*
+      In this case the user has specified that he wants a set of partitions
+      to be optimised and the partition engine can handle optimising
+      partitions natively without requiring a full rebuild of the
+      partitions.
+
+      In this case it is enough to call optimise_partitions, there is no
+      need to change frm files or anything else.
+    */
+    written_bin_log= FALSE;
+    if (((alter_info->flags & ALTER_OPTIMIZE_PARTITION) &&
+         (table->file->optimize_partitions(thd))) ||
+        ((alter_info->flags & ALTER_ANALYZE_PARTITION) &&
+         (table->file->analyze_partitions(thd))) ||
+        ((alter_info->flags & ALTER_CHECK_PARTITION) &&
+         (table->file->check_partitions(thd))) ||
+        ((alter_info->flags & ALTER_REPAIR_PARTITION) &&
+         (table->file->repair_partitions(thd))))
+    {
+      fast_alter_partition_error_handler(lpt);
+      DBUG_RETURN(TRUE);
+    }
+  }
+  else if (fast_alter_partition & HA_PARTITION_ONE_PHASE)
+  {
+    /*
+      In the case where the engine supports one phase online partition
+      changes it is not necessary to have any exclusive locks. The
+      correctness is upheld instead by transactions being aborted if they
+      access the table after its partition definition has changed (if they
+      are still using the old partition definition).
+
+      The handler is in this case responsible to ensure that all users
+      start using the new frm file after it has changed. To implement
+      one phase it is necessary for the handler to have the master copy
+      of the frm file and use discovery mechanisms to renew it. Thus
+      write frm will write the frm, pack the new frm and finally
+      the frm is deleted and the discovery mechanisms will either restore
+      back to the old or installing the new after the change is activated.
+
+      Thus all open tables will be discovered that they are old, if not
+      earlier as soon as they try an operation using the old table. One
+      should ensure that this is checked already when opening a table,
+      even if it is found in the cache of open tables.
+
+      change_partitions will perform all operations and it is the duty of
+      the handler to ensure that the frm files in the system gets updated
+      in synch with the changes made and if an error occurs that a proper
+      error handling is done.
+
+      If the MySQL Server crashes at this moment but the handler succeeds
+      in performing the change then the binlog is not written for the
+      change. There is no way to solve this as long as the binlog is not
+      transactional and even then it is hard to solve it completely.
+ 
+      The first approach here was to downgrade locks. Now a different approach
+      is decided upon. The idea is that the handler will have access to the
+      ALTER_INFO when store_lock arrives with TL_WRITE_ALLOW_READ. So if the
+      handler knows that this functionality can be handled with a lower lock
+      level it will set the lock level to TL_WRITE_ALLOW_WRITE immediately.
+      Thus the need to downgrade the lock disappears.
+      1) Write the new frm, pack it and then delete it
+      2) Perform the change within the handler
+    */
+    if ((mysql_write_frm(lpt, WFRM_INITIAL_WRITE | WFRM_PACK_FRM)) ||
+        (mysql_change_partitions(lpt)))
+    {
+      fast_alter_partition_error_handler(lpt);
+      DBUG_RETURN(TRUE);
+    }
+  }
+  else if (alter_info->flags == ALTER_DROP_PARTITION)
+  {
+    /*
+      Now after all checks and setting state on dropped partitions we can
+      start the actual dropping of the partitions.
+
+      Drop partition is actually two things happening. The first is that
+      a lot of records are deleted. The second is that the behaviour of
+      subsequent updates and writes and deletes will change. The delete
+      part can be handled without any particular high lock level by
+      transactional engines whereas non-transactional engines need to
+      ensure that this change is done with an exclusive lock on the table.
+      The second part, the change of partitioning does however require
+      an exclusive lock to install the new partitioning as one atomic
+      operation. If this is not the case, it is possible for two
+      transactions to see the change in a different order than their
+      serialisation order. Thus we need an exclusive lock for both
+      transactional and non-transactional engines.
+
+      For LIST partitions it could be possible to avoid the exclusive lock
+      (and for RANGE partitions if they didn't rearrange range definitions
+      after a DROP PARTITION) if one ensured that failed accesses to the
+      dropped partitions was aborted for sure (thus only possible for
+      transactional engines).
+      
+      1) Lock the table in TL_WRITE_ONLY to ensure all other accesses to
+         the table have completed
+      2) Write the new frm file where the partitions have changed but are
+         still remaining with the state PART_TO_BE_DROPPED
+      3) Write the bin log
+      4) Prepare MyISAM handlers for drop of partitions
+      5) Ensure that any users that has opened the table but not yet
+         reached the abort lock do that before downgrading the lock.
+      6) Drop the partitions
+      7) Write the frm file that the partition has been dropped
+      8) Wait until all accesses using the old frm file has completed
+      9) Complete query
+    */
+    if ((abort_and_upgrade_lock(lpt)) ||
+        (mysql_write_frm(lpt, WFRM_INITIAL_WRITE)) ||
+        ((!thd->lex->no_write_to_binlog) &&
+         (write_bin_log(thd, FALSE,
+                       thd->query, thd->query_length), FALSE)) ||
+        (table->file->extra(HA_EXTRA_PREPARE_FOR_DELETE)) ||
+        (close_open_tables_and_downgrade(lpt), FALSE) || 
+        (mysql_drop_partitions(lpt)) ||
+        (mysql_write_frm(lpt, WFRM_CREATE_HANDLER_FILES)) ||
+        (mysql_wait_completed_table(lpt, table), FALSE))
+    {
+      fast_alter_partition_error_handler(lpt);
+      DBUG_RETURN(TRUE);
+    }
+  }
+  else if ((alter_info->flags & ALTER_ADD_PARTITION) &&
+           (part_info->part_type == RANGE_PARTITION ||
+            part_info->part_type == LIST_PARTITION))
+  {
+    /*
+      ADD RANGE/LIST PARTITIONS
+      In this case there are no tuples removed and no tuples are added.
+      Thus the operation is merely adding a new partition. Thus it is
+      necessary to perform the change as an atomic operation. Otherwise
+      someone reading without seeing the new partition could potentially
+      miss updates made by a transaction serialised before it that are
+      inserted into the new partition.
+
+      1) Write the new frm file where state of added partitions is
+         changed to PART_TO_BE_ADDED
+      2) Add the new partitions
+      3) Lock all partitions in TL_WRITE_ONLY to ensure that no users
+         are still using the old partitioning scheme. Wait until all
+         ongoing users have completed before progressing.
+      4) Write a new frm file of the table where the partitions are added
+         to the table.
+      5) Write binlog
+      6) Wait until all accesses using the old frm file has completed
+      7) Complete query
+    */
+    if ((mysql_write_frm(lpt, WFRM_INITIAL_WRITE)) ||
+        (mysql_change_partitions(lpt)) ||
+        (abort_and_upgrade_lock(lpt)) ||
+        (mysql_write_frm(lpt, WFRM_CREATE_HANDLER_FILES)) ||
+        ((!thd->lex->no_write_to_binlog) &&
+         (write_bin_log(thd, FALSE,
+                        thd->query, thd->query_length), FALSE)) ||
+        (close_open_tables_and_downgrade(lpt), FALSE))
+    {
+      fast_alter_partition_error_handler(lpt);
+      DBUG_RETURN(TRUE);
+    }
+  }
+  else
+  {
+    /*
+      ADD HASH PARTITION/
+      COALESCE PARTITION/
+      REBUILD PARTITION/
+      REORGANIZE PARTITION
+ 
+      In this case all records are still around after the change although
+      possibly organised into new partitions, thus by ensuring that all
+      updates go to both the old and the new partitioning scheme we can
+      actually perform this operation lock-free. The only exception to
+      this is when REORGANIZE PARTITION adds/drops ranges. In this case
+      there needs to be an exclusive lock during the time when the range
+      changes occur.
+      This is only possible if the handler can ensure double-write for a
+      period. The double write will ensure that it doesn't matter where the
+      data is read from since both places are updated for writes. If such
+      double writing is not performed then it is necessary to perform the
+      change with the usual exclusive lock. With double writes it is even
+      possible to perform writes in parallel with the reorganisation of
+      partitions.
+
+      Without double write procedure we get the following procedure.
+      The only difference with using double write is that we can downgrade
+      the lock to TL_WRITE_ALLOW_WRITE. Double write in this case only
+      double writes from old to new. If we had double writing in both
+      directions we could perform the change completely without exclusive
+      lock for HASH partitions.
+      Handlers that perform double writing during the copy phase can actually
+      use a lower lock level. This can be handled inside store_lock in the
+      respective handler.
+
+      1) Write the new frm file where state of added partitions is
+         changed to PART_TO_BE_ADDED and the reorganised partitions
+         are set in state PART_TO_BE_REORGED.
+      2) Add the new partitions
+         Copy from the reorganised partitions to the new partitions
+      3) Lock all partitions in TL_WRITE_ONLY to ensure that no users
+         are still using the old partitioning scheme. Wait until all
+         ongoing users have completed before progressing.
+      4) Prepare MyISAM handlers for rename and delete of partitions
+      5) Write a new frm file of the table where the partitions are
+         reorganised.
+      6) Rename the reorged partitions such that they are no longer
+         used and rename those added to their real new names.
+      7) Write bin log
+      8) Wait until all accesses using the old frm file has completed
+      9) Drop the reorganised partitions
+      10)Write a new frm file of the table where the partitions are
+         reorganised.
+      11)Wait until all accesses using the old frm file has completed
+      12)Complete query
+    */
+
+    if ((mysql_write_frm(lpt, WFRM_INITIAL_WRITE)) ||
+        (mysql_change_partitions(lpt)) ||
+        (abort_and_upgrade_lock(lpt)) ||
+        (mysql_write_frm(lpt, WFRM_CREATE_HANDLER_FILES)) ||
+        (table->file->extra(HA_EXTRA_PREPARE_FOR_DELETE)) ||
+        (mysql_rename_partitions(lpt)) ||
+        ((!thd->lex->no_write_to_binlog) &&
+         (write_bin_log(thd, FALSE,
+                        thd->query, thd->query_length), FALSE)) ||
+        (close_open_tables_and_downgrade(lpt), FALSE) ||
+        (mysql_drop_partitions(lpt)) ||
+        (mysql_write_frm(lpt, 0UL)) ||
+        (mysql_wait_completed_table(lpt, table), FALSE))
+    {
+        fast_alter_partition_error_handler(lpt);
+        DBUG_RETURN(TRUE);
+    }
+  }
+  /*
+    A final step is to write the query to the binlog and send ok to the
+    user
+  */
+  DBUG_RETURN(fast_end_partition(thd, lpt->copied, lpt->deleted,
+                                 table_list, FALSE, lpt,
+                                 written_bin_log));
+}
 #endif
 
+
 /*
   Prepare for calling val_int on partition function by setting fields to
   point to the record where the values of the PF-fields are stored.
+
   SYNOPSIS
     set_field_ptr()
     ptr                 Array of fields to change ptr
     new_buf             New record pointer
     old_buf             Old record pointer
+
   DESCRIPTION
     Set ptr in field objects of field array to refer to new_buf record
     instead of previously old_buf. Used before calling val_int and after
@@ -3434,10 +5439,10 @@
 */
 
 void set_field_ptr(Field **ptr, const byte *new_buf,
-                            const byte *old_buf)
+                   const byte *old_buf)
 {
   my_ptrdiff_t diff= (new_buf - old_buf);
-  DBUG_ENTER("set_nullable_field_ptr");
+  DBUG_ENTER("set_field_ptr");
 
   do
   {
@@ -3452,11 +5457,13 @@
   point to the record where the values of the PF-fields are stored.
   This variant works on a key_part reference.
   It is not required that all fields are NOT NULL fields.
+
   SYNOPSIS
     set_key_field_ptr()
-    key_part            key part with a set of fields to change ptr
+    key_info            key info with a set of fields to change ptr
     new_buf             New record pointer
     old_buf             Old record pointer
+
   DESCRIPTION
     Set ptr in field objects of field array to refer to new_buf record
     instead of previously old_buf. Used before calling val_int and after
@@ -3469,7 +5476,8 @@
                        const byte *old_buf)
 {
   KEY_PART_INFO *key_part= key_info->key_part;
-  uint key_parts= key_info->key_parts, i= 0;
+  uint key_parts= key_info->key_parts;
+  uint i= 0;
   my_ptrdiff_t diff= (new_buf - old_buf);
   DBUG_ENTER("set_key_field_ptr");
 
@@ -3479,6 +5487,27 @@
     key_part++;
   } while (++i < key_parts);
   DBUG_VOID_RETURN;
+}
+
+
+/*
+  SYNOPSIS
+    mem_alloc_error()
+    size                Size of memory attempted to allocate
+    None
+
+  RETURN VALUES
+    None
+
+  DESCRIPTION
+    A routine to use for all the many places in the code where memory
+    allocation error can happen, a tremendous amount of them, needs
+    simple routine that signals this error.
+*/
+
+void mem_alloc_error(size_t size)
+{
+  my_error(ER_OUTOFMEMORY, MYF(0), size);
 }
 
 #ifdef WITH_PARTITION_STORAGE_ENGINE
Thread
bk commit into 5.1 tree (sergefp:1.2072)Sergey Petrunia18 Jan