List:Commits« Previous MessageNext Message »
From:Patrick Galbraith Date:January 18 2006 12:53am
Subject:bk commit into 5.1 tree (patg:1.2022)
View as plain text  
Below is the list of changes that have just been committed into a local
5.1 repository of patg. When patg does a push these changes will
be propagated to the main repository and, within 24 hours after the
push, to the public repository.
For information on how to access the public repository
see http://dev.mysql.com/doc/mysql/en/installing-source-tree.html

ChangeSet
  1.2022 06/01/17 16:53:36 patg@stripped +13 -0
  Merge pgalbraith@stripped:/home/bk/mysql-5.1-new
  into  govinda.patg.net:/home/patg/mysql-build/mysql-5.1-wl2682
  
  (WL# 2682, merge from 5.1-new to private 5.1 tree)

  sql/share/errmsg.txt
    1.64 06/01/17 16:53:33 patg@stripped +29 -29
    WL# 2682 merge 5.1-new to private tree

  sql/ha_partition.cc
    1.31 06/01/17 16:53:33 patg@stripped +21 -48
    WL# 2682, merge 5.1-new to private tree

  sql/table.h
    1.122 06/01/17 16:39:19 patg@stripped +0 -0
    Auto merged

  sql/sql_yacc.yy
    1.439 06/01/17 16:39:19 patg@stripped +0 -0
    Auto merged

  sql/sql_partition.cc
    1.24 06/01/17 16:39:19 patg@stripped +0 -0
    Auto merged

  sql/sql_parse.cc
    1.507 06/01/17 16:39:19 patg@stripped +0 -0
    Auto merged

  sql/sql_lex.h
    1.212 06/01/17 16:39:19 patg@stripped +0 -0
    Auto merged

  sql/sql_lex.cc
    1.170 06/01/17 16:39:19 patg@stripped +0 -0
    Auto merged

  sql/sql_base.cc
    1.294 06/01/17 16:39:19 patg@stripped +0 -0
    Auto merged

  sql/opt_range.cc
    1.196 06/01/17 16:39:19 patg@stripped +0 -0
    Auto merged

  sql/handler.h
    1.180 06/01/17 16:39:19 patg@stripped +0 -0
    Auto merged

  sql/ha_partition.h
    1.14 06/01/17 16:39:19 patg@stripped +0 -0
    Auto merged

  include/my_sys.h
    1.185 06/01/17 16:39:18 patg@stripped +0 -0
    Auto merged

# This is a BitKeeper patch.  What follows are the unified diffs for the
# set of deltas contained in the patch.  The rest of the patch, the part
# that BitKeeper cares about, is below these diffs.
# User:	patg
# Host:	govinda.patg.net
# Root:	/home/patg/mysql-build/mysql-5.1-wl2682/RESYNC

--- 1.184/include/my_sys.h	2006-01-08 12:40:30 -08:00
+++ 1.185/include/my_sys.h	2006-01-17 16:39:18 -08:00
@@ -753,6 +753,7 @@
 extern void delete_dynamic(DYNAMIC_ARRAY *array);
 extern void delete_dynamic_element(DYNAMIC_ARRAY *array, uint array_index);
 extern void freeze_size(DYNAMIC_ARRAY *array);
+extern int  get_index_dynamic(DYNAMIC_ARRAY *array, gptr element);
 #define dynamic_array_ptr(array,array_index) ((array)->buffer+(array_index)*(array)->size_of_element)
 #define dynamic_element(array,array_index,type) ((type)((array)->buffer) +(array_index))
 #define push_dynamic(A,B) insert_dynamic((A),(B))
@@ -805,6 +806,9 @@
 extern my_bool my_compress(byte *, ulong *, ulong *);
 extern my_bool my_uncompress(byte *, ulong *, ulong *);
 extern byte *my_compress_alloc(const byte *packet, ulong *len, ulong *complen);
+extern int packfrm(const void *, uint, const void **, uint *);
+extern int unpackfrm(const void **, uint *, const void *);
+
 extern ha_checksum my_checksum(ha_checksum crc, const byte *mem, uint count);
 extern uint my_bit_log2(ulong value);
 extern uint my_count_bits(ulonglong v);

--- 1.179/sql/handler.h	2006-01-17 16:36:21 -08:00
+++ 1.180/sql/handler.h	2006-01-17 16:39:19 -08:00
@@ -99,6 +99,7 @@
 #define HA_CAN_PARTITION       (1 << 0) /* Partition support */
 #define HA_CAN_UPDATE_PARTITION_KEY (1 << 1)
 #define HA_CAN_PARTITION_UNIQUE (1 << 2)
+#define HA_USE_AUTO_PARTITION (1 << 3)
 
 
 /* bits in index_flags(index_number) for what you can do with index */
@@ -109,9 +110,58 @@
 #define HA_ONLY_WHOLE_INDEX	16	/* Can't use part key searches */
 #define HA_KEYREAD_ONLY         64	/* Support HA_EXTRA_KEYREAD */
 
-/* bits in alter_table_flags */
-#define HA_ONLINE_ADD_EMPTY_PARTITION 1
-#define HA_ONLINE_DROP_PARTITION 2
+/*
+  bits in alter_table_flags:
+*/
+/*
+  These bits are set if different kinds of indexes can be created
+  off-line without re-create of the table (but with a table lock).
+*/
+#define HA_ONLINE_ADD_INDEX_NO_WRITES           (1L << 0) /*add index w/lock*/
+#define HA_ONLINE_DROP_INDEX_NO_WRITES          (1L << 1) /*drop index w/lock*/
+#define HA_ONLINE_ADD_UNIQUE_INDEX_NO_WRITES    (1L << 2) /*add unique w/lock*/
+#define HA_ONLINE_DROP_UNIQUE_INDEX_NO_WRITES   (1L << 3) /*drop uniq. w/lock*/
+#define HA_ONLINE_ADD_PK_INDEX_NO_WRITES        (1L << 4) /*add prim. w/lock*/
+#define HA_ONLINE_DROP_PK_INDEX_NO_WRITES       (1L << 5) /*drop prim. w/lock*/
+/*
+  These are set if different kinds of indexes can be created on-line
+  (without a table lock). If a handler is capable of one or more of
+  these, it should also set the corresponding *_NO_WRITES bit(s).
+*/
+#define HA_ONLINE_ADD_INDEX                     (1L << 6) /*add index online*/
+#define HA_ONLINE_DROP_INDEX                    (1L << 7) /*drop index online*/
+#define HA_ONLINE_ADD_UNIQUE_INDEX              (1L << 8) /*add unique online*/
+#define HA_ONLINE_DROP_UNIQUE_INDEX             (1L << 9) /*drop uniq. online*/
+#define HA_ONLINE_ADD_PK_INDEX                  (1L << 10)/*add prim. online*/
+#define HA_ONLINE_DROP_PK_INDEX                 (1L << 11)/*drop prim. online*/
+/*
+  HA_PARTITION_FUNCTION_SUPPORTED indicates that the function is
+  supported at all.
+  HA_FAST_CHANGE_PARTITION means that optimised variants of the changes
+  exists but they are not necessarily done online.
+
+  HA_ONLINE_DOUBLE_WRITE means that the handler supports writing to both
+  the new partition and to the old partitions when updating through the
+  old partitioning schema while performing a change of the partitioning.
+  This means that we can support updating of the table while performing
+  the copy phase of the change. For no lock at all also a double write
+  from new to old must exist and this is not required when this flag is
+  set.
+  This is actually removed even before it was introduced the first time.
+  The new idea is that handlers will handle the lock level already in
+  store_lock for ALTER TABLE partitions.
+
+  HA_PARTITION_ONE_PHASE is a flag that can be set by handlers that take
+  care of changing the partitions online and in one phase. Thus all phases
+  needed to handle the change are implemented inside the storage engine.
+  The storage engine must also support auto-discovery since the frm file
+  is changed as part of the change and this change must be controlled by
+  the storage engine. A typical engine to support this is NDB (through
+  WL #2498).
+*/
+#define HA_PARTITION_FUNCTION_SUPPORTED         (1L << 12)
+#define HA_FAST_CHANGE_PARTITION                (1L << 13)
+#define HA_PARTITION_ONE_PHASE                  (1L << 14)
 
 /*
   Index scan will not return records in rowid order. Not guaranteed to be
@@ -119,7 +169,6 @@
 */
 #define HA_KEY_SCAN_NOT_ROR     128 
 
-
 /* operations for disable/enable indexes */
 #define HA_KEY_SWITCH_NONUNIQ      0
 #define HA_KEY_SWITCH_ALL          1
@@ -135,16 +184,6 @@
 #define MAX_HA 15
 
 /*
-  Bits in index_ddl_flags(KEY *wanted_index)
-  for what ddl you can do with index
-  If none is set, the wanted type of index is not supported
-  by the handler at all. See WorkLog 1563.
-*/
-#define HA_DDL_SUPPORT   1 /* Supported by handler */
-#define HA_DDL_WITH_LOCK 2 /* Can create/drop with locked table */
-#define HA_DDL_ONLINE    4 /* Can create/drop without lock */
-
-/*
   Parameters for open() (in register form->filestat)
   HA_GET_INFO does an implicit HA_ABORT_IF_LOCKED
 */
@@ -205,6 +244,24 @@
 		ROW_TYPE_DYNAMIC, ROW_TYPE_COMPRESSED,
 		ROW_TYPE_REDUNDANT, ROW_TYPE_COMPACT };
 
+enum enum_binlog_func {
+  BFN_RESET_LOGS=        1,
+  BFN_RESET_SLAVE=       2,
+  BFN_BINLOG_WAIT=       3,
+  BFN_BINLOG_END=        4,
+  BFN_BINLOG_PURGE_FILE= 5
+};
+
+enum enum_binlog_command {
+  LOGCOM_CREATE_TABLE,
+  LOGCOM_ALTER_TABLE,
+  LOGCOM_RENAME_TABLE,
+  LOGCOM_DROP_TABLE,
+  LOGCOM_CREATE_DB,
+  LOGCOM_ALTER_DB,
+  LOGCOM_DROP_DB
+};
+
 /* struct to hold information about the table that should be created */
 
 /* Bits in used_fields */
@@ -315,6 +372,82 @@
 #define MAX_XID_LIST_SIZE  (1024*128)
 #endif
 
+/*
+  These structures are used to pass information from a set of SQL commands
+  on add/drop/change tablespace definitions to the proper hton.
+*/
+#define UNDEF_NODEGROUP 65535
+enum ts_command_type
+{
+  TS_CMD_NOT_DEFINED = -1,
+  CREATE_TABLESPACE = 0,
+  ALTER_TABLESPACE = 1,
+  CREATE_LOGFILE_GROUP = 2,
+  ALTER_LOGFILE_GROUP = 3,
+  DROP_TABLESPACE = 4,
+  DROP_LOGFILE_GROUP = 5,
+  CHANGE_FILE_TABLESPACE = 6,
+  ALTER_ACCESS_MODE_TABLESPACE = 7
+};
+
+enum ts_alter_tablespace_type
+{
+  TS_ALTER_TABLESPACE_TYPE_NOT_DEFINED = -1,
+  ALTER_TABLESPACE_ADD_FILE = 1,
+  ALTER_TABLESPACE_DROP_FILE = 2
+};
+
+enum tablespace_access_mode
+{
+  TS_NOT_DEFINED= -1,
+  TS_READ_ONLY = 0,
+  TS_READ_WRITE = 1,
+  TS_NOT_ACCESSIBLE = 2
+};
+
+class st_alter_tablespace : public Sql_alloc
+{
+  public:
+  const char *tablespace_name;
+  const char *logfile_group_name;
+  enum ts_command_type ts_cmd_type;
+  enum ts_alter_tablespace_type ts_alter_tablespace_type;
+  const char *data_file_name;
+  const char *undo_file_name;
+  const char *redo_file_name;
+  ulonglong extent_size;
+  ulonglong undo_buffer_size;
+  ulonglong redo_buffer_size;
+  ulonglong initial_size;
+  ulonglong autoextend_size;
+  ulonglong max_size;
+  uint nodegroup_id;
+  enum legacy_db_type storage_engine;
+  bool wait_until_completed;
+  const char *ts_comment;
+  enum tablespace_access_mode ts_access_mode;
+  st_alter_tablespace()
+  {
+    tablespace_name= NULL;
+    logfile_group_name= "DEFAULT_LG"; //Default log file group
+    ts_cmd_type= TS_CMD_NOT_DEFINED;
+    data_file_name= NULL;
+    undo_file_name= NULL;
+    redo_file_name= NULL;
+    extent_size= 1024*1024;        //Default 1 MByte
+    undo_buffer_size= 8*1024*1024; //Default 8 MByte
+    redo_buffer_size= 8*1024*1024; //Default 8 MByte
+    initial_size= 128*1024*1024;   //Default 128 MByte
+    autoextend_size= 0;            //No autoextension as default
+    max_size= 0;                   //Max size == initial size => no extension
+    storage_engine= DB_TYPE_UNKNOWN;
+    nodegroup_id= UNDEF_NODEGROUP;
+    wait_until_completed= TRUE;
+    ts_comment= NULL;
+    ts_access_mode= TS_NOT_DEFINED;
+  }
+};
+
 /* The handler for a table type.  Will be included in the TABLE structure */
 
 struct st_table;
@@ -344,7 +477,8 @@
     handlerton structure version
    */
   const int interface_version;
-#define MYSQL_HANDLERTON_INTERFACE_VERSION 0x0000
+/* last version change: 0x0001 in 5.1.6 */
+#define MYSQL_HANDLERTON_INTERFACE_VERSION 0x0001
 
 
   /*
@@ -434,7 +568,19 @@
    int (*start_consistent_snapshot)(THD *thd);
    bool (*flush_logs)();
    bool (*show_status)(THD *thd, stat_print_fn *print, enum ha_stat_type stat);
+   uint (*partition_flags)();
+   uint (*alter_table_flags)(uint flags);
+   int (*alter_tablespace)(THD *thd, st_alter_tablespace *ts_info);
    uint32 flags;                                /* global handler flags */
+   /* 
+      Handlerton functions are not set in the different storage
+      engines static initialization.  They are initialized at handler init.
+      Thus, leave them last in the struct.
+   */
+   int (*binlog_func)(THD *thd, enum_binlog_func fn, void *arg);
+   void (*binlog_log_query)(THD *thd, enum_binlog_command binlog_command,
+                            const char *query, uint query_length,
+                            const char *db, const char *table_name);
 } handlerton;
 
 extern const handlerton default_hton;
@@ -488,12 +634,27 @@
   PART_NORMAL= 0,
   PART_IS_DROPPED= 1,
   PART_TO_BE_DROPPED= 2,
-  PART_DROPPING= 3,
-  PART_IS_ADDED= 4,
-  PART_ADDING= 5,
-  PART_ADDED= 6
+  PART_TO_BE_ADDED= 3,
+  PART_TO_BE_REORGED= 4,
+  PART_REORGED_DROPPED= 5,
+  PART_CHANGED= 6,
+  PART_IS_CHANGED= 7,
+  PART_IS_ADDED= 8
 };
 
+typedef struct {
+  ulonglong data_file_length;
+  ulonglong max_data_file_length;
+  ulonglong index_file_length;
+  ulonglong delete_length;
+  ha_rows records;
+  ulong mean_rec_length;
+  time_t create_time;
+  time_t check_time;
+  time_t update_time;
+  ulonglong check_sum;
+} PARTITION_INFO;
+
 #define UNDEF_NODEGROUP 65535
 class Item;
 
@@ -531,13 +692,14 @@
 
 typedef struct {
   longlong list_value;
-  uint partition_id;
+  uint32 partition_id;
 } LIST_PART_ENTRY;
 
 class partition_info;
 
-typedef bool (*get_part_id_func)(partition_info *part_info,
-                                 uint32 *part_id);
+typedef int (*get_part_id_func)(partition_info *part_info,
+                                 uint32 *part_id,
+                                 longlong *func_value);
 typedef uint32 (*get_subpart_id_func)(partition_info *part_info);
 
 class partition_info :public Sql_alloc {
@@ -611,6 +773,8 @@
   char *part_func_string;
   char *subpart_func_string;
 
+  uchar *part_state;
+
   partition_element *curr_part_elem;
   partition_element *current_partition;
   /*
@@ -627,12 +791,12 @@
   partition_type subpart_type;
 
   uint part_info_len;
+  uint part_state_len;
   uint part_func_len;
   uint subpart_func_len;
 
   uint no_parts;
   uint no_subparts;
-  uint count_curr_parts;
   uint count_curr_subparts;
 
   uint part_error_code;
@@ -643,14 +807,24 @@
   uint no_subpart_fields;
   uint no_full_part_fields;
 
+  /*
+    This variable is used to calculate the partition id when using
+    LINEAR KEY/HASH. This functionality is kept in the MySQL Server
+    but mainly of use to handlers supporting partitioning.
+  */
   uint16 linear_hash_mask;
 
   bool use_default_partitions;
+  bool use_default_no_partitions;
   bool use_default_subpartitions;
+  bool use_default_no_subpartitions;
+  bool default_partitions_setup;
   bool defined_max_value;
   bool list_of_part_fields;
   bool list_of_subpart_fields;
   bool linear_hash_ind;
+  bool fixed;
+  bool from_openfrm;
 
   partition_info()
   : get_partition_id(NULL), get_part_partition_id(NULL),
@@ -661,19 +835,27 @@
     list_array(NULL),
     part_info_string(NULL),
     part_func_string(NULL), subpart_func_string(NULL),
+    part_state(NULL),
     curr_part_elem(NULL), current_partition(NULL),
     default_engine_type(NULL),
     part_result_type(INT_RESULT),
     part_type(NOT_A_PARTITION), subpart_type(NOT_A_PARTITION),
-    part_info_len(0), part_func_len(0), subpart_func_len(0),
+    part_info_len(0), part_state_len(0),
+    part_func_len(0), subpart_func_len(0),
     no_parts(0), no_subparts(0),
-    count_curr_parts(0), count_curr_subparts(0), part_error_code(0),
+    count_curr_subparts(0), part_error_code(0),
     no_list_values(0), no_part_fields(0), no_subpart_fields(0),
     no_full_part_fields(0), linear_hash_mask(0),
     use_default_partitions(TRUE),
-    use_default_subpartitions(TRUE), defined_max_value(FALSE),
+    use_default_no_partitions(TRUE),
+    use_default_subpartitions(TRUE),
+    use_default_no_subpartitions(TRUE),
+    default_partitions_setup(FALSE),
+    defined_max_value(FALSE),
     list_of_part_fields(FALSE), list_of_subpart_fields(FALSE),
-    linear_hash_ind(FALSE)
+    linear_hash_ind(FALSE),
+    fixed(FALSE),
+    from_openfrm(FALSE)
   {
     all_fields_in_PF.clear_all();
     all_fields_in_PPF.clear_all();
@@ -721,13 +903,15 @@
   return part_info->no_parts *
          (is_sub_partitioned(part_info) ? part_info->no_subparts : 1);
 }
+
+
 #endif
 
 typedef struct st_ha_create_information
 {
   CHARSET_INFO *table_charset, *default_table_charset;
   LEX_STRING connect_string;
-  const char *comment,*password;
+  const char *comment,*password, *tablespace;
   const char *data_file_name, *index_file_name;
   const char *alias;
   ulonglong max_rows,min_rows;
@@ -747,6 +931,7 @@
   bool table_existed;			/* 1 in create if table existed */
   bool frm_only;                        /* 1 if no ha_create_table() */
   bool varchar;                         /* 1 if table has a VARCHAR */
+  bool store_on_disk;                   /* 1 if table stored on disk */
 } HA_CREATE_INFO;
 
 
@@ -769,8 +954,8 @@
 
 #ifdef WITH_PARTITION_STORAGE_ENGINE
 bool is_partition_in_list(char *part_name, List<char> list_part_names);
-bool is_partitions_in_table(partition_info *new_part_info,
-                            partition_info *old_part_info);
+char *are_partitions_in_table(partition_info *new_part_info,
+                              partition_info *old_part_info);
 bool check_reorganise_list(partition_info *new_part_info,
                            partition_info *old_part_info,
                            List<char> list_part_names);
@@ -781,15 +966,17 @@
 handler *get_ha_partition(partition_info *part_info);
 int get_parts_for_update(const byte *old_data, byte *new_data,
                          const byte *rec0, partition_info *part_info,
-                         uint32 *old_part_id, uint32 *new_part_id);
+                         uint32 *old_part_id, uint32 *new_part_id,
+                         longlong *func_value);
 int get_part_for_delete(const byte *buf, const byte *rec0,
                         partition_info *part_info, uint32 *part_id);
-bool check_partition_info(partition_info *part_info,handlerton *eng_type,
+bool check_partition_info(partition_info *part_info,handlerton **eng_type,
                           handler *file, ulonglong max_rows);
-bool fix_partition_func(THD *thd, const char *name, TABLE *table);
+bool fix_partition_func(THD *thd, const char *name, TABLE *table,
+                        bool create_table_ind);
 char *generate_partition_syntax(partition_info *part_info,
                                 uint *buf_length, bool use_sql_alloc,
-                                bool add_default_info);
+                                bool write_all);
 bool partition_key_modified(TABLE *table, List<Item> &fields);
 void get_partition_set(const TABLE *table, byte *buf, const uint index,
                        const key_range *key_spec,
@@ -799,7 +986,9 @@
                                const key_range *key_spec,
                                part_id_range *part_spec);
 bool mysql_unpack_partition(THD *thd, const uchar *part_buf,
-                            uint part_info_len, TABLE *table,
+                            uint part_info_len,
+                            uchar *part_state, uint part_state_len,
+                            TABLE *table, bool is_create_table_ind,
                             handlerton *default_db_type);
 void make_used_partitions_str(partition_info *part_info, String *parts_str);
 uint32 get_list_array_idx_for_endpoint(partition_info *part_info,
@@ -832,7 +1021,6 @@
   byte *end_of_used_area;     /* End of area that was used by handler */
 } HANDLER_BUFFER;
 
-
 class handler :public Sql_alloc
 {
 #ifdef WITH_PARTITION_STORAGE_ENGINE
@@ -1120,6 +1308,12 @@
   virtual int ha_update_row(const byte * old_data, byte * new_data);
   virtual int ha_delete_row(const byte * buf);
   /*
+    If the handler does it's own injection of the rows, this member function
+    should return 'true'.
+  */
+  virtual bool is_injective() const { return false; }
+  
+  /*
     SYNOPSIS
       start_bulk_update()
     RETURN
@@ -1244,6 +1438,8 @@
     { return (ha_rows) 10; }
   virtual void position(const byte *record)=0;
   virtual void info(uint)=0; // see my_base.h for full description
+  virtual void get_dynamic_partition_info(PARTITION_INFO *stat_info,
+                                          uint part_id);
   virtual int extra(enum ha_extra_function operation)
   { return 0; }
   virtual int extra_opt(enum ha_extra_function operation, ulong cache_size)
@@ -1358,18 +1554,25 @@
   virtual const char *table_type() const =0;
   virtual const char **bas_ext() const =0;
   virtual ulong table_flags(void) const =0;
-  virtual ulong alter_table_flags(void) const { return 0; }
 #ifdef WITH_PARTITION_STORAGE_ENGINE
-  virtual ulong partition_flags(void) const { return 0;}
   virtual int get_default_no_partitions(ulonglong max_rows) { return 1;}
-  virtual void set_part_info(partition_info *part_info) { return; }
+  virtual void set_auto_partitions(partition_info *part_info) { return; }
+  virtual bool get_no_parts(const char *name,
+                            uint *no_parts)
+  {
+    *no_parts= 0;
+    return 0;
+  }
+  virtual void set_part_info(partition_info *part_info) {return;}
 #endif
   virtual ulong index_flags(uint idx, uint part, bool all_parts) const =0;
-  virtual ulong index_ddl_flags(KEY *wanted_index) const
-  { return (HA_DDL_SUPPORT); }
+
   virtual int add_index(TABLE *table_arg, KEY *key_info, uint num_of_keys)
   { return (HA_ERR_WRONG_COMMAND); }
-  virtual int drop_index(TABLE *table_arg, uint *key_num, uint num_of_keys)
+  virtual int prepare_drop_index(TABLE *table_arg, uint *key_num,
+                                 uint num_of_keys)
+  { return (HA_ERR_WRONG_COMMAND); }
+  virtual int final_drop_index(TABLE *table_arg)
   { return (HA_ERR_WRONG_COMMAND); }
 
   uint max_record_length() const
@@ -1406,19 +1609,26 @@
   virtual int create(const char *name, TABLE *form, HA_CREATE_INFO *info)=0;
   virtual int create_handler_files(const char *name) { return FALSE;}
 
-  /*
-    SYNOPSIS
-      drop_partitions()
-      path                        Complete path of db and table name
-    RETURN VALUE
-      TRUE                        Failure
-      FALSE                       Success
-    DESCRIPTION
-      Drop a partition, during this operation no other activity is ongoing
-      in this server on the table.
-  */
+  virtual int change_partitions(HA_CREATE_INFO *create_info,
+                                const char *path,
+                                ulonglong *copied,
+                                ulonglong *deleted,
+                                const void *pack_frm_data,
+                                uint pack_frm_len)
+  { return HA_ERR_WRONG_COMMAND; }
   virtual int drop_partitions(const char *path)
   { return HA_ERR_WRONG_COMMAND; }
+  virtual int rename_partitions(const char *path)
+  { return HA_ERR_WRONG_COMMAND; }
+  virtual int optimize_partitions(THD *thd)
+  { return HA_ERR_WRONG_COMMAND; }
+  virtual int analyze_partitions(THD *thd)
+  { return HA_ERR_WRONG_COMMAND; }
+  virtual int check_partitions(THD *thd)
+  { return HA_ERR_WRONG_COMMAND; }
+  virtual int repair_partitions(THD *thd)
+  { return HA_ERR_WRONG_COMMAND; }
+
   /* lock_count() can be more than one if the table is a MERGE */
   virtual uint lock_count(void) const { return 1; }
   virtual THR_LOCK_DATA **store_lock(THD *thd,
@@ -1628,3 +1838,21 @@
 int ha_repl_report_sent_binlog(THD *thd, char *log_file_name,
                                my_off_t end_offset);
 int ha_repl_report_replication_stop(THD *thd);
+
+#ifdef HAVE_NDB_BINLOG
+int ha_reset_logs(THD *thd);
+int ha_binlog_index_purge_file(THD *thd, const char *file);
+void ha_reset_slave(THD *thd);
+void ha_binlog_log_query(THD *thd, enum_binlog_command binlog_command,
+                         const char *query, uint query_length,
+                         const char *db, const char *table_name);
+void ha_binlog_wait(THD *thd);
+int ha_binlog_end(THD *thd);
+#else
+#define ha_reset_logs(a) 0
+#define ha_binlog_index_purge_file(a,b) 0
+#define ha_reset_slave(a)
+#define ha_binlog_log_query(a,b,c,d,e,f);
+#define ha_binlog_wait(a)
+#define ha_binlog_end(a) 0
+#endif

--- 1.195/sql/opt_range.cc	2006-01-16 16:50:32 -08:00
+++ 1.196/sql/opt_range.cc	2006-01-17 16:39:19 -08:00
@@ -2728,8 +2728,10 @@
         DBUG_EXECUTE("info", dbug_print_onepoint_range(ppar->arg_stack,
                                                        ppar->part_fields););
         uint32 part_id;
+        longlong func_value;
         /* then find in which partition the {const1, ...,constN} tuple goes */
-        if (ppar->get_top_partition_id_func(ppar->part_info, &part_id))
+        if (ppar->get_top_partition_id_func(ppar->part_info, &part_id,
+                                            &func_value))
         {
           res= 0; /* No satisfying partitions */
           goto pop_and_go_right;

--- 1.293/sql/sql_base.cc	2006-01-08 12:40:32 -08:00
+++ 1.294/sql/sql_base.cc	2006-01-17 16:39:19 -08:00
@@ -804,13 +804,14 @@
 */
 
 bool close_cached_tables(THD *thd, bool if_wait_for_refresh,
-			 TABLE_LIST *tables)
+			 TABLE_LIST *tables, bool have_lock)
 {
   bool result=0;
   DBUG_ENTER("close_cached_tables");
   DBUG_ASSERT(thd || (!if_wait_for_refresh && !tables));
 
-  VOID(pthread_mutex_lock(&LOCK_open));
+  if (!have_lock)
+    VOID(pthread_mutex_lock(&LOCK_open));
   if (!tables)
   {
     refresh_version++;				// Force close of open tables
@@ -889,7 +890,8 @@
     for (TABLE *table=thd->open_tables; table ; table= table->next)
       table->s->version= refresh_version;
   }
-  VOID(pthread_mutex_unlock(&LOCK_open));
+  if (!have_lock)
+    VOID(pthread_mutex_unlock(&LOCK_open));
   if (if_wait_for_refresh)
   {
     pthread_mutex_lock(&thd->mysys_var->mutex);
@@ -2083,8 +2085,10 @@
     (*field)->table_name= &table->alias;
   }
   for (key=0 ; key < table->s->keys ; key++)
+  {
     for (part=0 ; part < table->key_info[key].usable_key_parts ; part++)
       table->key_info[key].key_part[part].field->table= table;
+  }
   if (table->triggers)
     table->triggers->set_table(table);
 
@@ -2216,7 +2220,7 @@
       {
 	if (abort_locks)
 	{
-	  mysql_lock_abort(thd,table);		// Close waiting threads
+	  mysql_lock_abort(thd,table, TRUE);	// Close waiting threads
 	  mysql_lock_remove(thd, thd->locked_tables,table);
 	  table->locked_by_flush=1;		// Will be reopened with locks
 	}
@@ -2359,7 +2363,7 @@
     if (!strcmp(table->s->table_name.str, table_name) &&
 	!strcmp(table->s->db.str, db))
     {
-      mysql_lock_abort(thd,table);
+      mysql_lock_abort(thd,table, TRUE);
       break;
     }
   }
@@ -2385,7 +2389,7 @@
 
     table->s->table_map_id is not ULONG_MAX.
  */
-static void assign_new_table_id(TABLE *table)
+void assign_new_table_id(TABLE *table)
 {
   static ulong last_table_id= ULONG_MAX;
 
@@ -2471,7 +2475,7 @@
                                                HA_TRY_READ_ONLY),
                                        (READ_KEYINFO | COMPUTE_TYPES |
                                         EXTRA_RECORD),
-                                       thd->open_options, entry)))
+                                       thd->open_options, entry, FALSE)))
   {
     if (error == 7)                             // Table def changed
     {
@@ -2535,7 +2539,7 @@
                                        HA_TRY_READ_ONLY),
                                READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD,
                                ha_open_options | HA_OPEN_FOR_REPAIR,
-                               entry) || ! entry->file ||
+                               entry, FALSE) || ! entry->file ||
  	(entry->file->is_crashed() && entry->file->check_and_repair(thd)))
      {
        /* Give right error message */
@@ -3364,7 +3368,7 @@
                                     HA_GET_INDEX),
                             READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD,
                             ha_open_options,
-                            tmp_table))
+                            tmp_table, FALSE))
   {
     /* No need to lock share->mutex as this is not needed for tmp tables */
     free_table_share(share);
@@ -6068,3 +6072,155 @@
 {
   return a->length == b->length && !strncmp(a->str, b->str, a->length);
 }
+
+
+/*
+  SYNOPSIS
+    abort_and_upgrade_lock()
+    lpt                           Parameter passing struct
+    All parameters passed through the ALTER_PARTITION_PARAM_TYPE object
+  RETURN VALUES
+    TRUE                          Failure
+    FALSE                         Success
+  DESCRIPTION
+    Remember old lock level (for possible downgrade later on), abort all
+    waiting threads and ensure that all keeping locks currently are
+    completed such that we own the lock exclusively and no other interaction
+    is ongoing.
+
+    thd                           Thread object
+    table                         Table object
+    db                            Database name
+    table_name                    Table name
+    old_lock_level                Old lock level
+*/
+
+bool abort_and_upgrade_lock(ALTER_PARTITION_PARAM_TYPE *lpt)
+{
+  uint flags= RTFC_WAIT_OTHER_THREAD_FLAG | RTFC_CHECK_KILLED_FLAG;
+  int error= FALSE;
+  DBUG_ENTER("abort_and_upgrade_locks");
+
+  lpt->old_lock_type= lpt->table->reginfo.lock_type;
+  VOID(pthread_mutex_lock(&LOCK_open));
+  mysql_lock_abort(lpt->thd, lpt->table, TRUE);
+  VOID(remove_table_from_cache(lpt->thd, lpt->db, lpt->table_name, flags));
+  if (lpt->thd->killed)
+  {
+    lpt->thd->no_warnings_for_error= 0;
+    error= TRUE;
+  }
+  VOID(pthread_mutex_unlock(&LOCK_open));
+  DBUG_RETURN(error);
+}
+
+
+/*
+  SYNOPSIS
+    close_open_tables_and_downgrade()
+  RESULT VALUES
+    NONE
+  DESCRIPTION
+    We need to ensure that any thread that has managed to open the table
+    but not yet encountered our lock on the table is also thrown out to
+    ensure that no threads see our frm changes premature to the final
+    version. The intermediate versions are only meant for use after a
+    crash and later REPAIR TABLE.
+    We also downgrade locks after the upgrade to WRITE_ONLY
+*/
+
+void close_open_tables_and_downgrade(ALTER_PARTITION_PARAM_TYPE *lpt)
+{
+  VOID(pthread_mutex_lock(&LOCK_open));
+  remove_table_from_cache(lpt->thd, lpt->db, lpt->table_name,
+                          RTFC_WAIT_OTHER_THREAD_FLAG);
+  VOID(pthread_mutex_unlock(&LOCK_open));
+  mysql_lock_downgrade_write(lpt->thd, lpt->table, lpt->old_lock_type);
+}
+
+
+/*
+  SYNOPSIS
+    mysql_wait_completed_table()
+    lpt                            Parameter passing struct
+    my_table                       My table object
+    All parameters passed through the ALTER_PARTITION_PARAM object
+  RETURN VALUES
+    TRUE                          Failure
+    FALSE                         Success
+  DESCRIPTION
+    We have changed the frm file and now we want to wait for all users of
+    the old frm to complete before proceeding to ensure that no one
+    remains that uses the old frm definition.
+    Start by ensuring that all users of the table will be removed from cache
+    once they are done. Then abort all that have stumbled on locks and
+    haven't been started yet.
+
+    thd                           Thread object
+    table                         Table object
+    db                            Database name
+    table_name                    Table name
+*/
+
+void mysql_wait_completed_table(ALTER_PARTITION_PARAM_TYPE *lpt, TABLE *my_table)
+{
+  char key[MAX_DBKEY_LENGTH];
+  uint key_length;
+  TABLE *table;
+  DBUG_ENTER("mysql_wait_completed_table");
+
+  key_length=(uint) (strmov(strmov(key,lpt->db)+1,lpt->table_name)-key)+1;
+  VOID(pthread_mutex_lock(&LOCK_open));
+  HASH_SEARCH_STATE state;
+  for (table= (TABLE*) hash_first(&open_cache,(byte*) key,key_length,
+                                  &state) ;
+       table;
+       table= (TABLE*) hash_next(&open_cache,(byte*) key,key_length,
+                                 &state))
+  {
+    THD *in_use= table->in_use;
+    table->s->version= 0L;
+    if (!in_use)
+    {
+      relink_unused(table);
+    }
+    else
+    {
+      /* Kill delayed insert threads */
+      if ((in_use->system_thread & SYSTEM_THREAD_DELAYED_INSERT) &&
+          ! in_use->killed)
+      {
+        in_use->killed= THD::KILL_CONNECTION;
+        pthread_mutex_lock(&in_use->mysys_var->mutex);
+        if (in_use->mysys_var->current_cond)
+        {
+          pthread_mutex_lock(in_use->mysys_var->current_mutex);
+          pthread_cond_broadcast(in_use->mysys_var->current_cond);
+          pthread_mutex_unlock(in_use->mysys_var->current_mutex);
+        }
+        pthread_mutex_unlock(&in_use->mysys_var->mutex);
+      }
+      /*
+        Now we must abort all tables locks used by this thread
+        as the thread may be waiting to get a lock for another table
+      */
+      for (TABLE *thd_table= in_use->open_tables;
+           thd_table ;
+           thd_table= thd_table->next)
+      {
+        if (thd_table->db_stat)		// If table is open
+          mysql_lock_abort_for_thread(lpt->thd, thd_table);
+      }
+    }
+  }
+  /*
+    We start by removing all unused objects from the cache and marking
+    those in use for removal after completion. Now we also need to abort
+    all that are locked and are not progressing due to being locked
+    by our lock. We don't upgrade our lock here.
+  */
+  mysql_lock_abort(lpt->thd, my_table, FALSE);
+  VOID(pthread_mutex_unlock(&LOCK_open));
+  DBUG_VOID_RETURN;
+}
+

--- 1.169/sql/sql_lex.cc	2006-01-10 10:50:18 -08:00
+++ 1.170/sql/sql_lex.cc	2006-01-17 16:39:19 -08:00
@@ -1391,7 +1391,8 @@
 						  thr_lock_type flags,
 						  List<String> *use_index,
 						  List<String> *ignore_index,
-                                                  LEX_STRING *option)
+              LEX_STRING *option,
+              LEX_STRING *partition_name)
 {
   return 0;
 }

--- 1.211/sql/sql_lex.h	2006-01-16 23:37:27 -08:00
+++ 1.212/sql/sql_lex.h	2006-01-17 16:39:19 -08:00
@@ -372,7 +372,8 @@
 					thr_lock_type flags= TL_UNLOCK,
 					List<String> *use_index= 0,
 					List<String> *ignore_index= 0,
-                                        LEX_STRING *option= 0);
+          LEX_STRING *option= 0,
+          LEX_STRING *partition_name= 0);
   virtual void set_lock_for_tables(thr_lock_type lock_type) {}
 
   friend class st_select_lex_unit;
@@ -613,7 +614,8 @@
 				thr_lock_type flags= TL_UNLOCK,
 				List<String> *use_index= 0,
 				List<String> *ignore_index= 0,
-                                LEX_STRING *option= 0);
+        LEX_STRING *option= 0,
+        LEX_STRING *partition_name= 0);
   TABLE_LIST* get_table_list();
   bool init_nested_join(THD *thd);
   TABLE_LIST *end_nested_join(THD *thd);

--- 1.506/sql/sql_parse.cc	2006-01-08 12:40:32 -08:00
+++ 1.507/sql/sql_parse.cc	2006-01-17 16:39:19 -08:00
@@ -25,6 +25,7 @@
 #include "sp_head.h"
 #include "sp.h"
 #include "sp_cache.h"
+#include "event.h"
 
 #ifdef HAVE_OPENSSL
 /*
@@ -66,7 +67,7 @@
 static bool check_db_used(THD *thd,TABLE_LIST *tables);
 static bool check_multi_update_lock(THD *thd);
 static void remove_escape(char *name);
-static void refresh_status(void);
+static void refresh_status(THD *thd);
 static bool append_file_to_dir(THD *thd, const char **filename_ptr,
 			       const char *table_name);
 
@@ -78,7 +79,7 @@
   "Connect","Kill","Debug","Ping","Time","Delayed insert","Change user",
   "Binlog Dump","Table Dump",  "Connect Out", "Register Slave",
   "Prepare", "Execute", "Long Data", "Close stmt",
-  "Reset stmt", "Set option", "Fetch",
+  "Reset stmt", "Set option", "Fetch", "Daemon",
   "Error"					// Last command number
 };
 
@@ -148,7 +149,7 @@
   DBUG_RETURN(error);
 }
 
-static bool begin_trans(THD *thd)
+bool begin_trans(THD *thd)
 {
   int error=0;
   if (unlikely(thd->in_sub_stmt))
@@ -208,7 +209,7 @@
 {
   int return_val= 0;
   uint temp_len, user_len;
-  char temp_user[USERNAME_LENGTH+HOSTNAME_LENGTH+2];
+  char temp_user[USER_HOST_BUFF_SIZE];
   struct  user_conn *uc;
 
   DBUG_ASSERT(user != 0);
@@ -642,6 +643,9 @@
   uc_update_queries[SQLCOM_DROP_INDEX]=1;
   uc_update_queries[SQLCOM_CREATE_VIEW]=1;
   uc_update_queries[SQLCOM_DROP_VIEW]=1;
+  uc_update_queries[SQLCOM_CREATE_EVENT]=1;
+  uc_update_queries[SQLCOM_ALTER_EVENT]=1;
+  uc_update_queries[SQLCOM_DROP_EVENT]=1;  
 }
 
 bool is_update_query(enum enum_sql_command command)
@@ -737,7 +741,7 @@
   {
     USER_CONN *uc;
     uint temp_len=lu->user.length+lu->host.length+2;
-    char temp_user[USERNAME_LENGTH+HOSTNAME_LENGTH+2];
+    char temp_user[USER_HOST_BUFF_SIZE];
 
     memcpy(temp_user,lu->user.str,lu->user.length);
     memcpy(temp_user+lu->user.length+1,lu->host.str,lu->host.length);
@@ -3682,6 +3686,61 @@
     res=mysqld_show_create_db(thd,lex->name,&lex->create_info);
     break;
   }
+  case SQLCOM_CREATE_EVENT:
+  case SQLCOM_ALTER_EVENT:
+  case SQLCOM_DROP_EVENT:
+  {
+    uint rows_affected= 1;
+    DBUG_ASSERT(lex->et);
+    do {
+      if (! lex->et->dbname.str)
+      {
+        my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0));
+        res= true;
+        break;
+      }
+
+      if (check_access(thd, EVENT_ACL, lex->et->dbname.str, 0, 0, 0,
+                       is_schema_db(lex->et->dbname.str)))
+        break;
+
+      switch (lex->sql_command) {
+      case SQLCOM_CREATE_EVENT:
+        res= evex_create_event(thd, lex->et, (uint) lex->create_info.options,
+                               &rows_affected);
+        break;
+      case SQLCOM_ALTER_EVENT:
+        res= evex_update_event(thd, lex->et, lex->spname, &rows_affected);
+        break;
+      case SQLCOM_DROP_EVENT:
+        res= evex_drop_event(thd, lex->et, lex->drop_if_exists, &rows_affected);
+      default:;
+      }
+      if (!res)
+        send_ok(thd, rows_affected);
+
+      /* lex->unit.cleanup() is called outside, no need to call it here */
+    } while (0);  
+    lex->et->free_sphead_on_delete= true;
+    delete lex->et;
+    lex->et= 0;
+    break;
+  }
+  case SQLCOM_SHOW_CREATE_EVENT:
+  {
+    if (check_access(thd, EVENT_ACL, lex->spname->m_db.str, 0, 0, 0,
+                     is_schema_db(lex->spname->m_db.str)))
+      break;
+
+    if (lex->spname->m_name.length > NAME_LEN)
+    {
+      my_error(ER_TOO_LONG_IDENT, MYF(0), lex->spname->m_name.str);
+      goto error;
+    }
+    /* TODO : Implement it */
+    send_ok(thd, 1);
+    break;
+  }
   case SQLCOM_CREATE_FUNCTION:                  // UDF function
   {
     if (check_access(thd,INSERT_ACL,"mysql",0,1,0,0))
@@ -4818,6 +4877,12 @@
   case SQLCOM_XA_RECOVER:
     res= mysql_xa_recover(thd);
     break;
+  case SQLCOM_ALTER_TABLESPACE:
+    if (check_access(thd, ALTER_ACL, thd->db, 0, 1, 0, thd->db ? is_schema_db(thd->db) : 0))
+      break;
+    if (!(res= mysql_alter_tablespace(thd, lex->alter_tablespace_info)))
+      send_ok(thd);
+    break;
   case SQLCOM_INSTALL_PLUGIN:
     if (! (res= mysql_install_plugin(thd, &thd->lex->comment,
                                      &thd->lex->ident)))
@@ -5611,6 +5676,12 @@
 	    delete thd->lex->sphead;
 	    thd->lex->sphead= NULL;
 	  }
+          if (thd->lex->et)
+          {
+            thd->lex->et->free_sphead_on_delete= true;
+            delete thd->lex->et;
+            thd->lex->et= NULL;
+          }
 	}
 	else
 	{
@@ -5646,6 +5717,12 @@
 	delete thd->lex->sphead;
 	thd->lex->sphead= NULL;
       }
+      if (thd->lex->et)
+      {
+        thd->lex->et->free_sphead_on_delete= true;
+        delete thd->lex->et;
+        thd->lex->et= NULL;
+      }
     }
     thd->proc_info="freeing items";
     thd->end_statement();
@@ -6550,7 +6627,7 @@
   if (options & REFRESH_HOSTS)
     hostname_cache_refresh();
   if (thd && (options & REFRESH_STATUS))
-    refresh_status();
+    refresh_status(thd);
   if (options & REFRESH_THREADS)
     flush_thread_cache();
 #ifdef HAVE_REPLICATION
@@ -6607,6 +6684,8 @@
   I_List_iterator<THD> it(threads);
   while ((tmp=it++))
   {
+    if (tmp->command == COM_DAEMON)
+      continue;
     if (tmp->thread_id == id)
     {
       pthread_mutex_lock(&tmp->LOCK_delete);	// Lock from delete
@@ -6636,17 +6715,17 @@
 
 /* Clear most status variables */
 
-static void refresh_status(void)
+static void refresh_status(THD *thd)
 {
   pthread_mutex_lock(&LOCK_status);
-  for (SHOW_VAR *ptr= status_vars; ptr->name; ptr++)
-    if (ptr->type == SHOW_LONG) // note that SHOW_LONG_NOFLUSH variables are not reset
-      *(ulong*) ptr->value= 0;
 
   /* We must update the global status before cleaning up the thread */
-  THD *thd= current_thd;
   add_to_status(&global_status_var, &thd->status_var);
   bzero((char*) &thd->status_var, sizeof(thd->status_var));
+
+  for (SHOW_VAR *ptr= status_vars; ptr->name; ptr++)
+    if (ptr->type == SHOW_LONG) // note that SHOW_LONG_NOFLUSH variables are not reset
+      *(ulong*) ptr->value= 0;
 
   /* Reset the counters of all key caches (default and named). */
   process_key_caches(reset_key_cache_counters);

--- 1.438/sql/sql_yacc.yy	2006-01-16 23:37:29 -08:00
+++ 1.439/sql/sql_yacc.yy	2006-01-17 16:39:19 -08:00
@@ -733,7 +733,7 @@
         sp_opt_label BIN_NUM label_ident
 
 %type <lex_str_ptr>
-	opt_table_alias opt_fulltext_parser
+	opt_table_alias opt_fulltext_parser opt_use_partition
 
 %type <table>
 	table_ident table_ident_nodb references xid
@@ -6935,6 +6935,13 @@
 	| CROSS JOIN_SYM	{}
 	;
 
+opt_use_partition:
+	/* empty */	{ $$=0; }
+	| PARTITION_SYM '(' ident ')' 
+		{ $$= &$3; }
+	| SUBPARTITION_SYM '(' ident ')' 
+		{ $$= &$3; };
+		
 /* Warning - may return NULL in case of incomplete SELECT */
 table_factor:
 	{
@@ -6942,15 +6949,17 @@
 	  sel->use_index_ptr=sel->ignore_index_ptr=0;
 	  sel->table_join_options= 0;
 	}
-        table_ident opt_table_alias opt_key_definition
+        table_ident opt_use_partition opt_table_alias opt_key_definition
 	{
 	  LEX *lex= Lex;
+
 	  SELECT_LEX *sel= lex->current_select;
-	  if (!($$= sel->add_table_to_list(lex->thd, $2, $3,
+	  if (!($$= sel->add_table_to_list(lex->thd, $2, $4,
 					   sel->get_table_join_options(),
 					   lex->lock_option,
 					   sel->get_use_index(),
-					   sel->get_ignore_index())))
+					   sel->get_ignore_index(),
+					   0, $3)))
 	    YYABORT;
           sel->add_joined_table($$);
 	}

--- 1.121/sql/table.h	2005-12-31 00:44:55 -08:00
+++ 1.122/sql/table.h	2006-01-17 16:39:19 -08:00
@@ -201,6 +201,8 @@
 #ifdef WITH_PARTITION_STORAGE_ENGINE
   const uchar *partition_info;
   uint  partition_info_len;
+  const uchar *part_state;
+  uint part_state_len;
   handlerton *default_part_db_type;
 #endif
 } TABLE_SHARE;
@@ -333,6 +335,7 @@
   SCH_ENGINES,
   SCH_KEY_COLUMN_USAGE,
   SCH_OPEN_TABLES,
+  SCH_PARTITIONS,
   SCH_PLUGINS,
   SCH_PROCEDURES,
   SCH_SCHEMATA,

--- 1.63/sql/share/errmsg.txt	2005-12-28 14:36:57 -08:00
+++ 1.64/sql/share/errmsg.txt	2006-01-17 16:53:33 -08:00
@@ -5190,8 +5190,8 @@
 	eng "Failed to ALTER %s %s"
 	ger "ALTER %s %s fehlgeschlagen"
 ER_SP_SUBSELECT_NYI 0A000 
-	eng "Subselect value not supported"
+	eng "Subquery value not supported"
 ER_STMT_NOT_ALLOWED_IN_SF_OR_TRG 0A000
         eng "%s is not allowed in stored function or trigger"
 	ger "%s ist in gespeicherten Funktionen und in Triggern nicht erlaubt"
@@ -5601,13 +5601,13 @@
         eng "Recursive limit %d (as set by the max_sp_recursion_depth variable) was exceeded for routine %.64s"
 ER_SP_PROC_TABLE_CORRUPT
-	eng "Failed to load routine %s. The table mysql.proc is missing, corrupt, or contains bad data (internal code %d)"
+	eng "Failed to load routine %-.64s. The table mysql.proc is missing, corrupt, or contains bad data (internal code %d)"
 ER_PARTITION_REQUIRES_VALUES_ERROR
-        eng "%s PARTITIONING requires definition of VALUES %s for each partition"
+        eng "%-.64s PARTITIONING requires definition of VALUES %-.64s for each partition"
 ER_PARTITION_WRONG_VALUES_ERROR
-        eng "Only %s PARTITIONING can use VALUES %s in partition definition"
+        eng "Only %-.64s PARTITIONING can use VALUES %-.64s in partition definition"
 ER_PARTITION_MAXVALUE_ERROR
         eng "MAXVALUE can only be used in last partition definition"
@@ -5636,11 +5636,11 @@
         eng "The partition info in the frm file is not consistent with what can be written into the frm file"
 ER_PARTITION_FUNC_NOT_ALLOWED_ERROR
-        eng "The %s function returns the wrong type"
-        swe "%s-funktionen returnerar felaktig typ"
+        eng "The %-.64s function returns the wrong type"
+        swe "%-.64s-funktionen returnerar felaktig typ"
 ER_PARTITIONS_MUST_BE_DEFINED_ERROR
-        eng "For %s partitions each partition must be defined"
+        eng "For %-.64s partitions each partition must be defined"
 ER_RANGE_NOT_INCREASING_ERROR
         eng "VALUES LESS THAN value must be strictly increasing for each partition"
@@ -5657,8 +5657,8 @@
         eng "The mix of handlers in the partitions is not allowed in this version of MySQL"
 ER_PARTITION_NOT_DEFINED_ERROR
-        eng "For the partitioned engine it is necessary to define all %s"
+        eng "For the partitioned engine it is necessary to define all %-.64s"
 ER_TOO_MANY_PARTITIONS_ERROR
         eng "Too many partitions were defined"
@@ -5671,30 +5671,36 @@
 ER_BLOB_FIELD_IN_PART_FUNC_ERROR
         eng "A BLOB field is not allowed in partition function"
-ER_CHAR_SET_IN_PART_FIELD_ERROR
-        eng "VARCHAR only allowed if binary collation for partition functions"
 ER_UNIQUE_KEY_NEED_ALL_FIELDS_IN_PF
-        eng "A %s need to include all fields in the partition function"
+        eng "A %-.64s need to include all fields in the partition function"
 ER_NO_PARTS_ERROR
-        eng "Number of %s = 0 is not an allowed value"
+        eng "Number of %-.64s = 0 is not an allowed value"
 ER_PARTITION_MGMT_ON_NONPARTITIONED
         eng "Partition management on a not partitioned table is not possible"
+ER_FOREIGN_KEY_ON_PARTITIONED
+        eng "Foreign key condition is not yet supported in conjunction with partitioning"
 ER_DROP_PARTITION_NON_EXISTENT
-        eng "Error in list of partitions to change"
+        eng "Error in list of partitions to %-.64s"
+        swe "Fel i listan av partitioner att %-.64s"
 ER_DROP_LAST_PARTITION
         eng "Cannot remove all partitions, use DROP TABLE instead"
 ER_COALESCE_ONLY_ON_HASH_PARTITION
         eng "COALESCE PARTITION can only be used on HASH/KEY partitions"
+ER_REORG_HASH_ONLY_ON_SAME_NO
+        eng "REORGANISE PARTITION can only be used to reorganise partitions not to change their numbers"
+ER_REORG_NO_PARAM_ERROR
+        eng "REORGANISE PARTITION without parameters can only be used on auto-partitioned tables using HASH PARTITIONs"
 ER_ONLY_ON_RANGE_LIST_PARTITION
-        eng "%s PARTITION can only be used on RANGE/LIST partitions"
+        eng "%-.64s PARTITION can only be used on RANGE/LIST partitions"
 ER_ADD_PARTITION_SUBPART_ERROR
         eng "Trying to Add partition(s) with wrong number of subpartitions"
         swe "ADD PARTITION med fel antal subpartitioner"
@@ -5708,25 +5714,37 @@
         eng "More partitions to reorganise than there are partitions"
 ER_SAME_NAME_PARTITION
-        eng "All partitions must have unique names in the table"
+        eng "Duplicate partition name %-.64s"
+        swe "Duplicerat partitionsnamn %-.64s"
+ER_NO_BINLOG_ERROR
+        eng "It is not allowed to shut off binlog on this command"
 ER_CONSECUTIVE_REORG_PARTITIONS
         eng "When reorganising a set of partitions they must be in consecutive order"
 ER_REORG_OUTSIDE_RANGE
-        eng "The new partitions cover a bigger range then the reorganised partitions do"
-ER_DROP_PARTITION_FAILURE
-        eng "Drop partition not supported in this version for this handler"
-ER_DROP_PARTITION_WHEN_FK_DEFINED
-        eng "Cannot drop a partition when a foreign key constraint is defined on the table"
+        eng "Reorganize of range partitions cannot change total ranges except for last partition where it can extend the range"
+ER_PARTITION_FUNCTION_FAILURE
+        eng "Partition function not supported in this version for this handler"
+ER_PART_STATE_ERROR
+        eng "Partition state cannot be defined from CREATE/ALTER TABLE"
+ER_LIMITED_PART_RANGE
+        eng "The %-.64s handler only supports 32 bit integers in VALUES"
 ER_PLUGIN_IS_NOT_LOADED
 	eng "Plugin '%-.64s' is not loaded"
 ER_WRONG_VALUE
 	eng "Incorrect %-.32s value: '%-.128s'"
 ER_NO_PARTITION_FOR_GIVEN_VALUE
 	eng "Table has no partition for value %ld"
+ER_BINLOG_ROW_LOGGING_FAILED
+	eng "Writing one row to the row-based binary log failed"
+ER_BINLOG_ROW_WRONG_TABLE_DEF
+	eng "Table definition on master and slave does not match"
+ER_BINLOG_ROW_RBR_TO_SBR
+	eng "Slave running with --log-slave-updates must use row-based binary logging to be able to replicate row-based binary log events"
 ER_NO_SUCH_PARTITION 
 	cze "partion '%-.64s.%s' neexistuje"
 	dan "partition '%-.64s' eksisterer ikke"
@@ -5750,9 +5768,58 @@
 	spa "Particion '%-.64s' no existe"
 	swe "Det finns ingen partition som heter '%-.64s'"
+ER_TABLESPACE_OPTION_ONLY_ONCE
+        eng "It is not allowed to specify %s more than once"
+ER_CREATE_TABLESPACE_FAILED
+        eng "Failed to create %s"
+ER_DROP_TABLESPACE_FAILED
+        eng "Failed to drop %s"
+ER_TABLESPACE_AUTO_EXTEND_ERROR
+        eng "The handler doesn't support autoextend of tablespaces"
+ER_WRONG_SIZE_NUMBER
+        eng "A size parameter was incorrectly specified, either number or on the form 10M"
+ER_SIZE_OVERFLOW_ERROR
+        eng "The size number was correct but we don't allow the digit part to be more than 2 billion"
+ER_ALTER_TABLESPACE_FAILED
+        eng "Failed to alter: %s"
 ER_BINLOG_ROW_LOGGING_FAILED
 	eng "Writing one row to the row-based binary log failed"
 ER_BINLOG_ROW_WRONG_TABLE_DEF
 	eng "Table definition on master and slave does not match"
 ER_BINLOG_ROW_RBR_TO_SBR
 	eng "Slave running with --log-slave-updates must use row-based binary logging to be able to replicate row-based binary log events"
+ER_EVENT_ALREADY_EXISTS
+        eng "Event '%-.64s' already exists"
+ER_EVENT_STORE_FAILED
+        eng "Failed to store event %s. Error code %d from storage engine."
+ER_EVENT_DOES_NOT_EXIST
+        eng "Unknown event '%-.64s'"
+ER_EVENT_CANT_ALTER
+        eng "Failed to alter event '%-.64s'"
+ER_EVENT_DROP_FAILED
+        eng "Failed to drop %s"
+ER_EVENT_INTERVAL_NOT_POSITIVE
+        eng "INTERVAL must be positive"
+ER_EVENT_ENDS_BEFORE_STARTS
+        eng "ENDS must be after STARTS"
+ER_EVENT_EXEC_TIME_IN_THE_PAST
+        eng "Activation (AT) time is in the past"
+ER_EVENT_OPEN_TABLE_FAILED
+        eng "Failed to open mysql.event"
+ER_EVENT_NEITHER_M_EXPR_NOR_M_AT
+        eng "No datetime expression provided"
+ER_EVENT_COL_COUNT_DOESNT_MATCH
+        eng "Column count of %s.%s is wrong. Table probably corrupted"
+ER_EVENT_CANNOT_LOAD_FROM_TABLE
+        eng "Cannot load from mysql.event. Table probably corrupted"
+ER_EVENT_CANNOT_DELETE
+        eng "Failed to delete the event from mysql.event"
+ER_EVENT_COMPILE_ERROR
+        eng "Error during compilation of event's body"
+ER_EVENT_SAME_NAME
+        eng "Same old and new event name"
+ER_EVENT_DATA_TOO_LONG
+        eng "Data for column '%s' too long"
+ER_DROP_INDEX_FK
+        eng "Cannot drop index '%-.64s': needed in a foreign key constraint"

--- 1.30/sql/ha_partition.cc	2006-01-17 16:36:21 -08:00
+++ 1.31/sql/ha_partition.cc	2006-01-17 16:53:33 -08:00
@@ -67,6 +67,8 @@
 ****************************************************************************/
 
 static handler *partition_create_handler(TABLE_SHARE *share);
+static uint partition_flags();
+static uint alter_table_flags(uint flags);
 
 handlerton partition_hton = {
   MYSQL_HANDLERTON_INTERFACE_VERSION,
@@ -96,14 +98,68 @@
   NULL, /* Start Consistent Snapshot */
   NULL, /* Flush logs */
   NULL, /* Show status */
+  partition_flags, /* Partition flags */
+  alter_table_flags, /* Partition flags */
+  NULL, /* Alter Tablespace */
   HTON_NOT_USER_SELECTABLE | HTON_HIDDEN
 };
 
+/*
+  Create new partition handler
+
+  SYNOPSIS
+    partition_create_handler()
+    table                       Table object
+
+  RETURN VALUE
+    New partition object
+*/
+
 static handler *partition_create_handler(TABLE_SHARE *share)
 {
   return new ha_partition(share);
 }
 
+/*
+  HA_CAN_PARTITION:
+  Used by storage engines that can handle partitioning without this
+  partition handler
+  (Partition, NDB)
+
+  HA_CAN_UPDATE_PARTITION_KEY:
+  Set if the handler can update fields that are part of the partition
+  function.
+
+  HA_CAN_PARTITION_UNIQUE:
+  Set if the handler can handle unique indexes where the fields of the
+  unique key are not part of the fields of the partition function. Thus
+  a unique key can be set on all fields.
+
+  HA_USE_AUTO_PARTITION
+  Set if the handler sets all tables to be partitioned by default.
+*/
+
+static uint partition_flags()
+{
+  return HA_CAN_PARTITION;
+}
+
+static uint alter_table_flags(uint flags __attribute__((unused)))
+{
+  return (HA_PARTITION_FUNCTION_SUPPORTED |
+          HA_FAST_CHANGE_PARTITION);
+}
+
+/*
+  Constructor method
+
+  SYNOPSIS
+    ha_partition()
+    table                       Table object
+
+  RETURN VALUE
+    NONE
+*/
 
 ha_partition::ha_partition(TABLE_SHARE *share)
   :handler(&partition_hton, share), m_part_info(NULL), m_create_handler(FALSE),
@@ -115,6 +171,17 @@
 }
 
 
+/*
+  Constructor method
+
+  SYNOPSIS
+    ha_partition()
+    part_info                       Partition info
+
+  RETURN VALUE
+    NONE
+*/
+
 ha_partition::ha_partition(partition_info *part_info)
   :handler(&partition_hton, NULL), m_part_info(part_info),
    m_create_handler(TRUE),
@@ -128,13 +195,28 @@
 }
 
 
+/*
+  Initialise handler object
+
+  SYNOPSIS
+    init_handler_variables()
+
+  RETURN VALUE
+    NONE
+*/
+
 void ha_partition::init_handler_variables()
 {
   active_index= MAX_KEY;
+  m_mode= 0;
+  m_open_test_lock= 0;
   m_file_buffer= NULL;
   m_name_buffer_ptr= NULL;
   m_engine_array= NULL;
   m_file= NULL;
+  m_reorged_file= NULL;
+  m_reorged_parts= 0;
+  m_added_file= NULL;
   m_tot_parts= 0;
   m_has_transactions= 0;
   m_pkey_is_clustered= 0;
@@ -172,6 +254,16 @@
 }
 
 
+/*
+  Destructor method
+
+  SYNOPSIS
+    ~ha_partition()
+
+  RETURN VALUE
+    NONE
+*/
+
 ha_partition::~ha_partition()
 {
   DBUG_ENTER("ha_partition::~ha_partition()");
@@ -189,6 +281,17 @@
 
 
 /*
+  Initialise partition handler object
+
+  SYNOPSIS
+    ha_initialise()
+
+  RETURN VALUE
+    1                         Error
+    0                         Success
+
+  DESCRIPTION
+
   The partition handler is only a layer on top of other engines. Thus it
   can't really perform anything without the underlying handlers. Thus we
   add this method as part of the allocation of a handler object.
@@ -218,6 +321,7 @@
      sort will be performed using the underlying handlers.
   5) primary_key_is_clustered, has_transactions and low_byte_first is
      calculated here.
+
 */
 
 int ha_partition::ha_initialise()
@@ -244,7 +348,7 @@
   }
   else if (get_from_handler_file(table_share->normalized_path.str))
   {
-    my_error(ER_OUTOFMEMORY, MYF(0), 129); //Temporary fix TODO print_error
+    mem_alloc_error(2);
     DBUG_RETURN(1);
   }
   /*
@@ -289,47 +393,119 @@
                 MODULE meta data changes
 ****************************************************************************/
 /*
-  This method is used to calculate the partition name, service routine to
-  the del_ren_cre_table method.
-*/
+  Create partition names
 
-static void create_partition_name(char *out, const char *in1, const char *in2)
+  SYNOPSIS
+    create_partition_name()
+    out:out                   Created partition name string
+    in1                       First part
+    in2                       Second part
+    name_variant              Normal, temporary or renamed partition name
+
+  RETURN VALUE
+    NONE
+
+  DESCRIPTION
+    This method is used to calculate the partition name, service routine to
+    the del_ren_cre_table method.
+*/
+
+#define NORMAL_PART_NAME 0
+#define TEMP_PART_NAME 1
+#define RENAMED_PART_NAME 2
+static void create_partition_name(char *out, const char *in1,
+                                  const char *in2, uint name_variant,
+                                  bool translate)
 {
-  strxmov(out, in1, "_", in2, NullS);
+  char transl_part_name[FN_REFLEN];
+  const char *transl_part;
+
+  if (translate)
+  {
+    tablename_to_filename(in2, transl_part_name, FN_REFLEN);
+    transl_part= transl_part_name;
+  }
+  else
+    transl_part= in2;
+  if (name_variant == NORMAL_PART_NAME)
+    strxmov(out, in1, "#P#", transl_part, NullS);
+  else if (name_variant == TEMP_PART_NAME)
+    strxmov(out, in1, "#P#", transl_part, "#TMP#", NullS);
+  else if (name_variant == RENAMED_PART_NAME)
+    strxmov(out, in1, "#P#", transl_part, "#REN#", NullS);
 }
 
 /*
-  This method is used to calculate the partition name, service routine to
+  Create subpartition name
+
+  SYNOPSIS
+    create_subpartition_name()
+    out:out                   Created partition name string
+    in1                       First part
+    in2                       Second part
+    in3                       Third part
+    name_variant              Normal, temporary or renamed partition name
+
+  RETURN VALUE
+    NONE
+
+  DESCRIPTION
+  This method is used to calculate the subpartition name, service routine to
   the del_ren_cre_table method.
 */
 
 static void create_subpartition_name(char *out, const char *in1,
-                                     const char *in2, const char *in3)
+                                     const char *in2, const char *in3,
+                                     uint name_variant)
 {
-  strxmov(out, in1, "_", in2, "_", in3, NullS);
+  char transl_part_name[FN_REFLEN], transl_subpart_name[FN_REFLEN];
+
+  tablename_to_filename(in2, transl_part_name, FN_REFLEN);
+  tablename_to_filename(in3, transl_subpart_name, FN_REFLEN);
+  if (name_variant == NORMAL_PART_NAME)
+    strxmov(out, in1, "#P#", transl_part_name,
+            "#SP#", transl_subpart_name, NullS);
+  else if (name_variant == TEMP_PART_NAME)
+    strxmov(out, in1, "#P#", transl_part_name,
+            "#SP#", transl_subpart_name, "#TMP#", NullS);
+  else if (name_variant == RENAMED_PART_NAME)
+    strxmov(out, in1, "#P#", transl_part_name,
+            "#SP#", transl_subpart_name, "#REN#", NullS);
 }
 
 
 /*
-  Used to delete a table. By the time delete_table() has been called all
-  opened references to this table will have been closed (and your globally
-  shared references released. The variable name will just be the name of
-  the table. You will need to remove any files you have created at this
-  point.
+  Delete a table
 
-  If you do not implement this, the default delete_table() is called from
-  handler.cc and it will delete all files with the file extentions returned
-  by bas_ext().
+  SYNOPSIS
+    delete_table()
+    name                    Full path of table name
 
-  Called from handler.cc by delete_table and  ha_create_table(). Only used
-  during create if the table_flag HA_DROP_BEFORE_CREATE was specified for
-  the storage engine.
+  RETURN VALUE
+    >0                        Error
+    0                         Success
+
+  DESCRIPTION
+    Used to delete a table. By the time delete_table() has been called all
+    opened references to this table will have been closed (and your globally
+    shared references released. The variable name will just be the name of
+    the table. You will need to remove any files you have created at this
+    point.
+
+    If you do not implement this, the default delete_table() is called from
+    handler.cc and it will delete all files with the file extentions returned
+    by bas_ext().
+
+    Called from handler.cc by delete_table and  ha_create_table(). Only used
+    during create if the table_flag HA_DROP_BEFORE_CREATE was specified for
+    the storage engine.
 */
 
 int ha_partition::delete_table(const char *name)
 {
   int error;
   DBUG_ENTER("ha_partition::delete_table");
+
   if ((error= del_ren_cre_table(name, NULL, NULL, NULL)))
     DBUG_RETURN(error);
   DBUG_RETURN(handler::delete_table(name));
@@ -337,19 +513,32 @@
 
 
 /*
-  Renames a table from one name to another from alter table call.
+  Rename a table
 
-  If you do not implement this, the default rename_table() is called from
-  handler.cc and it will delete all files with the file extentions returned
-  by bas_ext().
+  SYNOPSIS
+    rename_table()
+    from                      Full path of old table name
+    to                        Full path of new table name
+
+  RETURN VALUE
+    >0                        Error
+    0                         Success
+
+  DESCRIPTION
+    Renames a table from one name to another from alter table call.
+
+    If you do not implement this, the default rename_table() is called from
+    handler.cc and it will rename all files with the file extentions returned
+    by bas_ext().
 
-  Called from sql_table.cc by mysql_rename_table().
+    Called from sql_table.cc by mysql_rename_table().
 */
 
 int ha_partition::rename_table(const char *from, const char *to)
 {
   int error;
   DBUG_ENTER("ha_partition::rename_table");
+
   if ((error= del_ren_cre_table(from, to, NULL, NULL)))
     DBUG_RETURN(error);
   DBUG_RETURN(handler::rename_table(from, to));
@@ -357,11 +546,22 @@
 
 
 /*
-  create_handler_files is called to create any handler specific files
-  before opening the file with openfrm to later call ::create on the
-  file object.
-  In the partition handler this is used to store the names of partitions
-  and types of engines in the partitions.
+  Create the handler file (.par-file)
+
+  SYNOPSIS
+    create_handler_files()
+    name                              Full path of table name
+
+  RETURN VALUE
+    >0                        Error
+    0                         Success
+
+  DESCRIPTION
+    create_handler_files is called to create any handler specific files
+    before opening the file with openfrm to later call ::create on the
+    file object.
+    In the partition handler this is used to store the names of partitions
+    and types of engines in the partitions.
 */
 
 int ha_partition::create_handler_files(const char *name)
@@ -372,7 +572,6 @@
     We need to update total number of parts since we might write the handler
     file as part of a partition management command
   */
-  m_tot_parts= get_tot_partitions(m_part_info);
   if (create_handler_file(name))
   {
     my_error(ER_CANT_CREATE_HANDLER_FILE, MYF(0));
@@ -383,14 +582,27 @@
 
 
 /*
-  create() is called to create a table. The variable name will have the name
-  of the table. When create() is called you do not need to worry about
-  opening the table. Also, the FRM file will have already been created so
-  adjusting create_info will not do you any good. You can overwrite the frm
-  file at this point if you wish to change the table definition, but there
-  are no methods currently provided for doing that.
+  Create a partitioned table
+
+  SYNOPSIS
+    create()
+    name                              Full path of table name
+    table_arg                         Table object
+    create_info                       Create info generated for CREATE TABLE
+
+  RETURN VALUE
+    >0                        Error
+    0                         Success
+
+  DESCRIPTION
+    create() is called to create a table. The variable name will have the name
+    of the table. When create() is called you do not need to worry about
+    opening the table. Also, the FRM file will have already been created so
+    adjusting create_info will not do you any good. You can overwrite the frm
+    file at this point if you wish to change the table definition, but there
+    are no methods currently provided for doing that.
 
-  Called from handle.cc by ha_create_table().
+    Called from handler.cc by ha_create_table().
 */
 
 int ha_partition::create(const char *name, TABLE *table_arg,
@@ -409,23 +621,838 @@
   DBUG_RETURN(0);
 }
 
+
+/*
+  Drop partitions as part of ALTER TABLE of partitions
+
+  SYNOPSIS
+    drop_partitions()
+    path                        Complete path of db and table name
+
+  RETURN VALUE
+    >0                          Failure
+    0                           Success
+
+  DESCRIPTION
+    Use part_info object on handler object to deduce which partitions to
+    drop (each partition has a state attached to it)
+*/
+
 int ha_partition::drop_partitions(const char *path)
 {
   List_iterator<partition_element> part_it(m_part_info->partitions);
+  List_iterator<partition_element> temp_it(m_part_info->temp_partitions);
   char part_name_buff[FN_REFLEN];
+  uint no_parts= m_part_info->partitions.elements;
+  uint part_count= 0;
+  uint no_subparts= m_part_info->no_subparts;
+  uint i= 0;
+  uint name_variant;
+  int  error= 1;
+  bool reorged_parts= (m_reorged_parts > 0);
+  bool temp_partitions= (m_part_info->temp_partitions.elements > 0);
+  DBUG_ENTER("ha_partition::drop_partitions");
+
+  if (temp_partitions)
+    no_parts= m_part_info->temp_partitions.elements;
+  do
+  {
+    partition_element *part_elem;
+    if (temp_partitions)
+    {
+      /*
+        We need to remove the reorganised partitions that were put in the
+        temp_partitions-list.
+      */
+      part_elem= temp_it++;
+      DBUG_ASSERT(part_elem->part_state == PART_TO_BE_DROPPED);
+    }
+    else
+      part_elem= part_it++;
+    if (part_elem->part_state == PART_TO_BE_DROPPED ||
+        part_elem->part_state == PART_IS_CHANGED)
+    {
+      handler *file;
+      /*
+        This part is to be dropped, meaning the part or all its subparts.
+      */
+      name_variant= NORMAL_PART_NAME;
+      if (part_elem->part_state == PART_IS_CHANGED ||
+          (part_elem->part_state == PART_TO_BE_DROPPED && temp_partitions))
+        name_variant= RENAMED_PART_NAME;
+      if (m_is_sub_partitioned)
+      {
+        List_iterator<partition_element> sub_it(part_elem->subpartitions);
+        uint j= 0, part;
+        do
+        {
+          partition_element *sub_elem= sub_it++;
+          part= i * no_subparts + j;
+          create_subpartition_name(part_name_buff, path,
+                                   part_elem->partition_name,
+                                   sub_elem->partition_name, name_variant);
+          if (reorged_parts)
+            file= m_reorged_file[part_count++];
+          else
+            file= m_file[part];
+          DBUG_PRINT("info", ("Drop subpartition %s", part_name_buff));
+          error= file->delete_table((const char *) part_name_buff);
+        } while (++j < no_subparts);
+      }
+      else
+      {
+        create_partition_name(part_name_buff, path,
+                              part_elem->partition_name, name_variant,
+                              TRUE);
+        if (reorged_parts)
+          file= m_reorged_file[part_count++];
+        else
+          file= m_file[i];
+        DBUG_PRINT("info", ("Drop partition %s", part_name_buff));
+        error= file->delete_table((const char *) part_name_buff);
+      }
+      if (part_elem->part_state == PART_IS_CHANGED)
+        part_elem->part_state= PART_NORMAL;
+      else
+        part_elem->part_state= PART_IS_DROPPED;
+    }
+  } while (++i < no_parts);
+  DBUG_RETURN(error);
+}
+
+
+/*
+  Rename partitions as part of ALTER TABLE of partitions
+
+  SYNOPSIS
+    rename_partitions()
+    path                        Complete path of db and table name
+
+  RETURN VALUE
+    TRUE                        Failure
+    FALSE                       Success
+
+  DESCRIPTION
+    When reorganising partitions, adding hash partitions and coalescing
+    partitions it can be necessary to rename partitions while holding
+    an exclusive lock on the table.
+    Which partitions to rename is given by state of partitions found by the
+    partition info struct referenced from the handler object
+*/
+
+int ha_partition::rename_partitions(const char *path)
+{
+  List_iterator<partition_element> part_it(m_part_info->partitions);
+  List_iterator<partition_element> temp_it(m_part_info->temp_partitions);
+  char part_name_buff[FN_REFLEN];
+  char norm_name_buff[FN_REFLEN];
+  uint no_parts= m_part_info->partitions.elements;
+  uint part_count= 0;
+  uint no_subparts= m_part_info->no_subparts;
+  uint i= 0;
+  uint j= 0;
+  int error= 1;
+  uint temp_partitions= m_part_info->temp_partitions.elements;
+  handler *file;
+  partition_element *part_elem, *sub_elem;
+  DBUG_ENTER("ha_partition::rename_partitions");
+
+  if (temp_partitions)
+  {
+    do
+    {
+      part_elem= temp_it++;
+      if (m_is_sub_partitioned)
+      {
+        List_iterator<partition_element> sub_it(part_elem->subpartitions);
+        do
+        {
+          sub_elem= sub_it++;
+          file= m_reorged_file[part_count++];
+          create_subpartition_name(part_name_buff, path,
+                                   part_elem->partition_name,
+                                   sub_elem->partition_name,
+                                   RENAMED_PART_NAME);
+          create_subpartition_name(norm_name_buff, path,
+                                   part_elem->partition_name,
+                                   sub_elem->partition_name,
+                                   NORMAL_PART_NAME);
+          DBUG_PRINT("info", ("Rename subpartition from %s to %s",
+                     norm_name_buff, part_name_buff));
+          error= file->rename_table((const char *) norm_name_buff,
+                                    (const char *) part_name_buff);
+        } while (++j < no_subparts);
+      }
+      else
+      {
+        file= m_reorged_file[part_count++];
+        create_partition_name(part_name_buff, path,
+                              part_elem->partition_name, RENAMED_PART_NAME,
+                              TRUE);
+        create_partition_name(norm_name_buff, path,
+                              part_elem->partition_name, NORMAL_PART_NAME,
+                              TRUE);
+        DBUG_PRINT("info", ("Rename partition from %s to %s",
+                   norm_name_buff, part_name_buff));
+        error= file->rename_table((const char *) norm_name_buff,
+                                  (const char *) part_name_buff);
+      }
+    } while (++i < temp_partitions);
+  }
+  i= 0;
+  do
+  {
+    part_elem= part_it++;
+    if (part_elem->part_state == PART_IS_CHANGED ||
+        (part_elem->part_state == PART_IS_ADDED && temp_partitions))
+    {
+      if (m_is_sub_partitioned)
+      {
+        List_iterator<partition_element> sub_it(part_elem->subpartitions);
+        uint part;
+
+        j= 0;
+        do
+        {
+          sub_elem= sub_it++;
+          part= i * no_subparts + j;
+          create_subpartition_name(norm_name_buff, path,
+                                   part_elem->partition_name,
+                                   sub_elem->partition_name,
+                                   NORMAL_PART_NAME);
+          if (part_elem->part_state == PART_IS_CHANGED)
+          {
+            file= m_reorged_file[part_count++];
+            create_subpartition_name(part_name_buff, path,
+                                     part_elem->partition_name,
+                                     sub_elem->partition_name,
+                                     RENAMED_PART_NAME);
+            DBUG_PRINT("info", ("Rename subpartition from %s to %s",
+                       norm_name_buff, part_name_buff));
+            error= file->rename_table((const char *) norm_name_buff,
+                                      (const char *) part_name_buff);
+          }
+          file= m_new_file[part];
+          create_subpartition_name(part_name_buff, path,
+                                   part_elem->partition_name,
+                                   sub_elem->partition_name,
+                                   TEMP_PART_NAME);
+          DBUG_PRINT("info", ("Rename subpartition from %s to %s",
+                     part_name_buff, norm_name_buff));
+          error= file->rename_table((const char *) part_name_buff,
+                                    (const char *) norm_name_buff);
+        } while (++j < no_subparts);
+      }
+      else
+      {
+        create_partition_name(norm_name_buff, path,
+                              part_elem->partition_name, NORMAL_PART_NAME,
+                              TRUE);
+        if (part_elem->part_state == PART_IS_CHANGED)
+        {
+          file= m_reorged_file[part_count++];
+          create_partition_name(part_name_buff, path,
+                                part_elem->partition_name, RENAMED_PART_NAME,
+                                TRUE);
+          DBUG_PRINT("info", ("Rename partition from %s to %s",
+                     norm_name_buff, part_name_buff));
+          error= file->rename_table((const char *) norm_name_buff,
+                                    (const char *) part_name_buff);
+        }
+        file= m_new_file[i];
+        create_partition_name(part_name_buff, path,
+                              part_elem->partition_name, TEMP_PART_NAME,
+                              TRUE);
+        DBUG_PRINT("info", ("Rename partition from %s to %s",
+                   part_name_buff, norm_name_buff));
+        error= file->rename_table((const char *) part_name_buff,
+                                  (const char *) norm_name_buff);
+      }
+    }
+  } while (++i < no_parts);
+  DBUG_RETURN(error);
+}
+
+
+#define OPTIMIZE_PARTS 1
+#define ANALYZE_PARTS 2
+#define CHECK_PARTS   3
+#define REPAIR_PARTS 4
+
+/*
+  Optimize table
+
+  SYNOPSIS
+    optimize()
+    thd               Thread object
+    check_opt         Check/analyze/repair/optimize options
+
+  RETURN VALUES
+    >0                Error
+    0                 Success
+*/
+
+int ha_partition::optimize(THD *thd, HA_CHECK_OPT *check_opt)
+{
+  DBUG_ENTER("ha_partition::optimize");
+
+  DBUG_RETURN(handle_opt_partitions(thd, &thd->lex->check_opt, 
+                                    OPTIMIZE_PARTS, TRUE));
+}
+
+
+/*
+  Analyze table
+
+  SYNOPSIS
+    analyze()
+    thd               Thread object
+    check_opt         Check/analyze/repair/optimize options
+
+  RETURN VALUES
+    >0                Error
+    0                 Success
+*/
+
+int ha_partition::analyze(THD *thd, HA_CHECK_OPT *check_opt)
+{
+  DBUG_ENTER("ha_partition::analyze");
+
+  DBUG_RETURN(handle_opt_partitions(thd, &thd->lex->check_opt, 
+                                    ANALYZE_PARTS, TRUE));
+}
+
+
+/*
+  Check table
+
+  SYNOPSIS
+    check()
+    thd               Thread object
+    check_opt         Check/analyze/repair/optimize options
+
+  RETURN VALUES
+    >0                Error
+    0                 Success
+*/
+
+int ha_partition::check(THD *thd, HA_CHECK_OPT *check_opt)
+{
+  DBUG_ENTER("ha_partition::check");
+
+  DBUG_RETURN(handle_opt_partitions(thd, &thd->lex->check_opt, 
+                                    CHECK_PARTS, TRUE));
+}
+
+
+/*
+  Repair table
+
+  SYNOPSIS
+    repair()
+    thd               Thread object
+    check_opt         Check/analyze/repair/optimize options
+
+  RETURN VALUES
+    >0                Error
+    0                 Success
+*/
+
+int ha_partition::repair(THD *thd, HA_CHECK_OPT *check_opt)
+{
+  DBUG_ENTER("ha_partition::repair");
+
+  DBUG_RETURN(handle_opt_partitions(thd, &thd->lex->check_opt, 
+                                    REPAIR_PARTS, TRUE));
+}
+
+/*
+  Optimize partitions
+
+  SYNOPSIS
+    optimize_partitions()
+    thd                   Thread object
+  RETURN VALUE
+    >0                        Failure
+    0                         Success
+  DESCRIPTION
+    Call optimize on each partition marked with partition state PART_CHANGED
+*/
+
+int ha_partition::optimize_partitions(THD *thd)
+{
+  DBUG_ENTER("ha_partition::optimize_partitions");
+
+  DBUG_RETURN(handle_opt_partitions(thd, &thd->lex->check_opt, 
+                                    OPTIMIZE_PARTS, FALSE));
+}
+
+/*
+  Analyze partitions
+
+  SYNOPSIS
+    analyze_partitions()
+    thd                   Thread object
+  RETURN VALUE
+    >0                        Failure
+    0                         Success
+  DESCRIPTION
+    Call analyze on each partition marked with partition state PART_CHANGED
+*/
+
+int ha_partition::analyze_partitions(THD *thd)
+{
+  DBUG_ENTER("ha_partition::analyze_partitions");
+
+  DBUG_RETURN(handle_opt_partitions(thd, &thd->lex->check_opt, 
+                                    ANALYZE_PARTS, FALSE));
+}
+
+/*
+  Check partitions
+
+  SYNOPSIS
+    check_partitions()
+    thd                   Thread object
+  RETURN VALUE
+    >0                        Failure
+    0                         Success
+  DESCRIPTION
+    Call check on each partition marked with partition state PART_CHANGED
+*/
+
+int ha_partition::check_partitions(THD *thd)
+{
+  DBUG_ENTER("ha_partition::check_partitions");
+
+  DBUG_RETURN(handle_opt_partitions(thd, &thd->lex->check_opt, 
+                                    CHECK_PARTS, FALSE));
+}
+
+/*
+  Repair partitions
+
+  SYNOPSIS
+    repair_partitions()
+    thd                   Thread object
+  RETURN VALUE
+    >0                        Failure
+    0                         Success
+  DESCRIPTION
+    Call repair on each partition marked with partition state PART_CHANGED
+*/
+
+int ha_partition::repair_partitions(THD *thd)
+{
+  DBUG_ENTER("ha_partition::repair_partitions");
+
+  DBUG_RETURN(handle_opt_partitions(thd, &thd->lex->check_opt, 
+                                    REPAIR_PARTS, FALSE));
+}
+
+
+/*
+  Handle optimize/analyze/check/repair of one partition
+
+  SYNOPSIS
+    handle_opt_part()
+    thd                      Thread object
+    check_opt                Options
+    file                     Handler object of partition
+    flag                     Optimize/Analyze/Check/Repair flag
+
+  RETURN VALUE
+    >0                        Failure
+    0                         Success
+*/
+
+static int handle_opt_part(THD *thd, HA_CHECK_OPT *check_opt,
+                           handler *file, uint flag)
+{
+  int error;
+  DBUG_ENTER("handle_opt_part");
+  DBUG_PRINT("enter", ("flag = %u", flag));
+
+  if (flag == OPTIMIZE_PARTS)
+    error= file->optimize(thd, check_opt);
+  else if (flag == ANALYZE_PARTS)
+    error= file->analyze(thd, check_opt);
+  else if (flag == CHECK_PARTS)
+    error= file->check(thd, check_opt);
+  else if (flag == REPAIR_PARTS)
+    error= file->repair(thd, check_opt);
+  else
+  {
+    DBUG_ASSERT(FALSE);
+    error= 1;
+  }
+  if (error == HA_ADMIN_ALREADY_DONE)
+    error= 0;
+  DBUG_RETURN(error);
+}
+
+
+/*
+  Handle optimize/analyze/check/repair of partitions
+
+  SYNOPSIS
+    handle_opt_partitions()
+    thd                      Thread object
+    check_opt                Options
+    flag                     Optimize/Analyze/Check/Repair flag
+    all_parts                All partitions or only a subset
+
+  RETURN VALUE
+    >0                        Failure
+    0                         Success
+*/
+
+int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt,
+                                        uint flag, bool all_parts)
+{
+  List_iterator<partition_element> part_it(m_part_info->partitions);
   uint no_parts= m_part_info->no_parts;
-  uint no_subparts= m_part_info->no_subparts, i= 0;
+  uint no_subparts= m_part_info->no_subparts;
+  uint i= 0;
+  LEX *lex= thd->lex;
+  int error;
+  DBUG_ENTER("ha_partition::handle_opt_partitions");
+  DBUG_PRINT("enter", ("all_parts %u, flag= %u", all_parts, flag));
+
+  do
+  {
+    partition_element *part_elem= part_it++;
+    if (all_parts || part_elem->part_state == PART_CHANGED)
+    {
+      handler *file;
+      if (m_is_sub_partitioned)
+      {
+        List_iterator<partition_element> sub_it(part_elem->subpartitions);
+        uint j= 0, part;
+        do
+        {
+          partition_element *sub_elem= sub_it++;
+          part= i * no_subparts + j;
+          DBUG_PRINT("info", ("Optimize subpartition %u",
+                     part));
+          if ((error= handle_opt_part(thd, check_opt, m_file[part], flag)))
+          {
+            my_error(ER_GET_ERRNO, MYF(0), error);
+            DBUG_RETURN(TRUE);
+          }
+        } while (++j < no_subparts);
+      }
+      else
+      {
+        DBUG_PRINT("info", ("Optimize partition %u", i));
+        if ((error= handle_opt_part(thd, check_opt, m_file[i], flag)))
+        {
+          my_error(ER_GET_ERRNO, MYF(0), error);
+          DBUG_RETURN(TRUE);
+        }
+      }
+    }
+  } while (++i < no_parts);
+  DBUG_RETURN(FALSE);
+}
+
+/*
+  Prepare by creating a new partition
+
+  SYNOPSIS
+    prepare_new_partition()
+    table                      Table object
+    create_info                Create info from CREATE TABLE
+    file                       Handler object of new partition
+    part_name                  partition name
+
+  RETURN VALUE
+    >0                         Error
+    0                          Success
+*/
+
+int ha_partition::prepare_new_partition(TABLE *table,
+                                        HA_CREATE_INFO *create_info,
+                                        handler *file, const char *part_name)
+{
+  int error;
+  bool create_flag= FALSE;
+  bool open_flag= FALSE;
+  DBUG_ENTER("prepare_new_partition");
+
+  if ((error= file->create(part_name, table, create_info)))
+    goto error;
+  create_flag= TRUE;
+  if ((error= file->ha_open(table, part_name, m_mode, m_open_test_lock)))
+    goto error;
+  if ((error= file->external_lock(current_thd, m_lock_type)))
+    goto error;
+
+  DBUG_RETURN(0);
+error:
+  if (create_flag)
+    VOID(file->delete_table(part_name));
+  print_error(error, MYF(0));
+  DBUG_RETURN(error);
+}
+
+
+/*
+  Cleanup by removing all created partitions after error
+
+  SYNOPSIS
+    cleanup_new_partition()
+    part_count             Number of partitions to remove
+
+  RETURN VALUE
+    NONE
+
+  DESCRIPTION
+  TODO:
+    We must ensure that in the case that we get an error during the process
+    that we call external_lock with F_UNLCK, close the table and delete the
+    table in the case where we have been successful with prepare_handler.
+    We solve this by keeping an array of successful calls to prepare_handler
+    which can then be used to undo the call.
+*/
+
+void ha_partition::cleanup_new_partition(uint part_count)
+{
+  handler **save_m_file= m_file;
+  DBUG_ENTER("ha_partition::cleanup_new_partition");
+
+  if (m_added_file && m_added_file[0])
+  {
+    m_file= m_added_file;
+    m_added_file= NULL;
+
+    external_lock(current_thd, F_UNLCK);
+    /* delete_table also needed, a bit more complex */
+    close();
+
+    m_added_file= m_file;
+    m_file= save_m_file;
+  }
+  DBUG_VOID_RETURN;
+}
+
+/*
+  Implement the partition changes defined by ALTER TABLE of partitions
+
+  SYNOPSIS
+    change_partitions()
+    create_info                 HA_CREATE_INFO object describing all
+                                fields and indexes in table
+    path                        Complete path of db and table name
+    out: copied                 Output parameter where number of copied
+                                records are added
+    out: deleted                Output parameter where number of deleted
+                                records are added
+    pack_frm_data               Reference to packed frm file
+    pack_frm_len                Length of packed frm file
+
+  RETURN VALUE
+    >0                        Failure
+    0                         Success
+
+  DESCRIPTION
+    Add and copy if needed a number of partitions, during this operation
+    no other operation is ongoing in the server. This is used by
+    ADD PARTITION all types as well as by REORGANIZE PARTITION. For
+    one-phased implementations it is used also by DROP and COALESCE
+    PARTITIONs.
+    One-phased implementation needs the new frm file, other handlers will
+    get zero length and a NULL reference here.
+*/
+
+int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
+                                    const char *path,
+                                    ulonglong *copied,
+                                    ulonglong *deleted,
+                                    const void *pack_frm_data
+                                    __attribute__((unused)),
+                                    uint pack_frm_len
+                                    __attribute__((unused)))
+{
+  List_iterator<partition_element> part_it(m_part_info->partitions);
+  List_iterator <partition_element> t_it(m_part_info->temp_partitions);
+  char part_name_buff[FN_REFLEN];
+  uint no_parts= m_part_info->partitions.elements;
+  uint no_subparts= m_part_info->no_subparts;
+  uint i= 0;
+  uint no_remain_partitions, part_count;
+  handler **new_file_array;
   int error= 1;
-  DBUG_ENTER("ha_partition::drop_partitions()");
+  bool first;
+  bool copy_parts= FALSE;
+  uint temp_partitions= m_part_info->temp_partitions.elements;
+  THD *thd= current_thd;
+  DBUG_ENTER("ha_partition::change_partitions");
+
+  m_reorged_parts= 0;
+  if (!is_sub_partitioned(m_part_info))
+    no_subparts= 1;
+
+  /*
+    Step 1:
+      Calculate number of reorganised partitions and allocate space for
+      their handler references.
+  */
+  if (temp_partitions)
+  {
+    m_reorged_parts= temp_partitions * no_subparts;
+  }
+  else
+  {
+    do
+    {
+      partition_element *part_elem= part_it++;
+      if (part_elem->part_state == PART_CHANGED ||
+          part_elem->part_state == PART_REORGED_DROPPED)
+      {
+        m_reorged_parts+= no_subparts;
+      }
+    } while (++i < no_parts);
+  }
+  if (m_reorged_parts &&
+      !(m_reorged_file= (handler**)sql_calloc(sizeof(partition_element*)*
+                                              (m_reorged_parts + 1))))
+  {
+    mem_alloc_error(sizeof(partition_element*)*(m_reorged_parts+1));
+    DBUG_RETURN(TRUE);
+  }
 
+  /*
+    Step 2:
+      Calculate number of partitions after change and allocate space for
+      their handler references.
+  */
+  no_remain_partitions= 0;
+  if (temp_partitions)
+  {
+    no_remain_partitions= no_parts * no_subparts;
+  }
+  else
+  {
+    part_it.rewind();
+    i= 0;
+    do
+    {
+      partition_element *part_elem= part_it++;
+      if (part_elem->part_state == PART_NORMAL ||
+          part_elem->part_state == PART_TO_BE_ADDED ||
+          part_elem->part_state == PART_CHANGED)
+      {
+        no_remain_partitions+= no_subparts;
+      }
+    } while (++i < no_parts);
+  }
+  if (!(new_file_array= (handler**)sql_calloc(sizeof(handler*)*
+                                              (2*(no_remain_partitions + 1)))))
+  {
+    mem_alloc_error(sizeof(handler*)*2*(no_remain_partitions+1));
+    DBUG_RETURN(TRUE);
+  }
+  m_added_file= &new_file_array[no_remain_partitions + 1];
+
+  /*
+    Step 3:
+      Fill m_reorged_file with handler references and NULL at the end
+  */
+  if (m_reorged_parts)
+  {
+    i= 0;
+    part_count= 0;
+    first= TRUE;
+    part_it.rewind();
+    do
+    {
+      partition_element *part_elem= part_it++;
+      if (part_elem->part_state == PART_CHANGED ||
+          part_elem->part_state == PART_REORGED_DROPPED)
+      {
+        memcpy((void*)&m_reorged_file[part_count],
+               (void*)&m_file[i*no_subparts],
+               sizeof(handler*)*no_subparts);
+        part_count+= no_subparts;
+      }
+      else if (first && temp_partitions &&
+               part_elem->part_state == PART_TO_BE_ADDED)
+      {
+        /*
+          When doing an ALTER TABLE REORGANIZE PARTITION a number of
+          partitions is to be reorganised into a set of new partitions.
+          The reorganised partitions are in this case in the temp_partitions
+          list. We copy all of them in one batch and thus we only do this
+          until we find the first partition with state PART_TO_BE_ADDED
+          since this is where the new partitions go in and where the old
+          ones used to be.
+        */
+        first= FALSE;
+        memcpy((void*)m_reorged_file, &m_file[i*no_subparts],
+               sizeof(handler*)*m_reorged_parts*no_subparts);
+      }
+    } while (++i < no_parts);
+  }
+
+  /*
+    Step 4:
+      Fill new_array_file with handler references. Create the handlers if
+      needed.
+  */
+  i= 0;
+  part_count= 0;
+  part_it.rewind();
   do
   {
     partition_element *part_elem= part_it++;
-    if (part_elem->part_state == PART_IS_DROPPED)
+    if (part_elem->part_state == PART_NORMAL)
+    {
+      memcpy((void*)&new_file_array[part_count], (void*)&m_file[i],
+             sizeof(handler*)*no_subparts);
+      part_count+= no_subparts;
+    }
+    else if (part_elem->part_state == PART_CHANGED ||
+             part_elem->part_state == PART_TO_BE_ADDED)
+    {
+      uint j= 0;
+      do
+      {
+        if (!(new_file_array[part_count++]= get_new_handler(table->s,
+                                            thd->mem_root,
+                                            part_elem->engine_type)))
+        {
+          mem_alloc_error(sizeof(handler));
+          DBUG_RETURN(TRUE);
+        }
+      } while (++j < no_subparts);
+    }
+  } while (++i < no_parts);
+
+  /*
+    Step 5:
+      Create the new partitions and also open, lock and call external_lock
+      on them to prepare them for copy phase and also for later close
+      calls
+  */
+  i= 0;
+  part_count= 0;
+  part_it.rewind();
+  do
+  {
+    partition_element *part_elem= part_it++;
+    if (part_elem->part_state == PART_TO_BE_ADDED ||
+        part_elem->part_state == PART_CHANGED)
     {
       /*
-        This part is to be dropped, meaning the part or all its subparts.
+        A new partition needs to be created PART_TO_BE_ADDED means an
+        entirely new partition and PART_CHANGED means a changed partition
+        that will still exist with either more or less data in it.
       */
+      uint name_variant= NORMAL_PART_NAME;
+      if (part_elem->part_state == PART_CHANGED ||
+          (part_elem->part_state == PART_TO_BE_ADDED && temp_partitions))
+        name_variant= TEMP_PART_NAME;
       if (is_sub_partitioned(m_part_info))
       {
         List_iterator<partition_element> sub_it(part_elem->subpartitions);
@@ -435,44 +1462,204 @@
           partition_element *sub_elem= sub_it++;
           create_subpartition_name(part_name_buff, path,
                                    part_elem->partition_name,
-                                   sub_elem->partition_name);
+                                   sub_elem->partition_name,
+                                   name_variant);
           part= i * no_subparts + j;
-          DBUG_PRINT("info", ("Drop subpartition %s", part_name_buff));
-          error= m_file[part]->delete_table((const char *) part_name_buff);
+          DBUG_PRINT("info", ("Add subpartition %s", part_name_buff));
+          if ((error= prepare_new_partition(table, create_info,
+                                            new_file_array[part],
+                                            (const char *)part_name_buff)))
+          {
+            cleanup_new_partition(part_count);
+            DBUG_RETURN(TRUE);
+          }
+          m_added_file[part_count++]= new_file_array[part];
         } while (++j < no_subparts);
       }
       else
       {
         create_partition_name(part_name_buff, path,
-                              part_elem->partition_name);
-        DBUG_PRINT("info", ("Drop partition %s", part_name_buff));
-        error= m_file[i]->delete_table((const char *) part_name_buff);
+                              part_elem->partition_name, name_variant,
+                              TRUE);
+        DBUG_PRINT("info", ("Add partition %s", part_name_buff));
+        if ((error= prepare_new_partition(table, create_info,
+                                          new_file_array[i],
+                                          (const char *)part_name_buff)))
+        {
+          cleanup_new_partition(part_count);
+          DBUG_RETURN(TRUE);
+        }
+        m_added_file[part_count++]= new_file_array[i];
       }
     }
   } while (++i < no_parts);
-  DBUG_RETURN(error);
+
+  /*
+    Step 6:
+      State update to prepare for next write of the frm file.
+  */
+  i= 0;
+  part_it.rewind();
+  do
+  {
+    partition_element *part_elem= part_it++;
+    if (part_elem->part_state == PART_TO_BE_ADDED)
+      part_elem->part_state= PART_IS_ADDED;
+    else if (part_elem->part_state == PART_CHANGED)
+      part_elem->part_state= PART_IS_CHANGED;
+    else if (part_elem->part_state == PART_REORGED_DROPPED)
+      part_elem->part_state= PART_TO_BE_DROPPED;
+  } while (++i < no_parts);
+  for (i= 0; i < temp_partitions; i++)
+  {
+    partition_element *part_elem= t_it++;
+    DBUG_ASSERT(part_elem->part_state == PART_TO_BE_REORGED);
+    part_elem->part_state= PART_TO_BE_DROPPED;
+  }
+  m_new_file= new_file_array;
+  DBUG_RETURN(copy_partitions(copied, deleted));
+}
+
+
+/*
+  Copy partitions as part of ALTER TABLE of partitions
+
+  SYNOPSIS
+    copy_partitions()
+    out:copied                 Number of records copied
+    out:deleted                Number of records deleted
+
+  RETURN VALUE
+    >0                         Error code
+    0                          Success
+
+  DESCRIPTION
+    change_partitions has done all the preparations, now it is time to
+    actually copy the data from the reorganised partitions to the new
+    partitions.
+*/
+
+int ha_partition::copy_partitions(ulonglong *copied, ulonglong *deleted)
+{
+  uint reorg_part= 0;
+  int result= 0;
+  longlong func_value;
+  DBUG_ENTER("ha_partition::copy_partitions");
+
+  while (reorg_part < m_reorged_parts)
+  {
+    handler *file= m_reorged_file[reorg_part];
+    uint32 new_part;
+
+    late_extra_cache(reorg_part);
+    if ((result= file->ha_rnd_init(1)))
+      goto error;
+    while (TRUE)
+    {
+      if ((result= file->rnd_next(m_rec0)))
+      {
+        if (result == HA_ERR_RECORD_DELETED)
+          continue;                              //Probably MyISAM
+        if (result != HA_ERR_END_OF_FILE)
+          goto error;
+        /*
+          End-of-file reached, break out to continue with next partition or
+          end the copy process.
+        */
+        break;
+      }
+      /* Found record to insert into new handler */
+      if (m_part_info->get_partition_id(m_part_info, &new_part,
+                                        &func_value))
+      {
+        /*
+           This record is in the original table but will not be in the new
+           table since it doesn't fit into any partition any longer due to
+           changed partitioning ranges or list values.
+        */
+        deleted++;
+      }
+      else
+      {
+        /* Copy record to new handler */
+        copied++;
+        if ((result= m_new_file[new_part]->write_row(m_rec0)))
+          goto error;
+      }
+    }
+    late_extra_no_cache(reorg_part);
+    file->rnd_end();
+    reorg_part++;
+  }
+  DBUG_RETURN(FALSE);
+error:
+  print_error(result, MYF(0));
+  DBUG_RETURN(TRUE);
 }
 
+
+/*
+  Update create info as part of ALTER TABLE
+
+  SYNOPSIS
+    update_create_info()
+    create_info                   Create info from ALTER TABLE
+
+  RETURN VALUE
+    NONE
+
+  DESCRIPTION
+    Method empty so far
+*/
+
 void ha_partition::update_create_info(HA_CREATE_INFO *create_info)
 {
   return;
 }
 
 
+/*
+  Change comments specific to handler
+
+  SYNOPSIS
+    update_table_comment()
+    comment                       Original comment
+
+  RETURN VALUE
+    new comment 
+
+  DESCRIPTION
+    No comment changes so far
+*/
+
 char *ha_partition::update_table_comment(const char *comment)
 {
-  return (char*) comment;                       // Nothing to change
+  return (char*) comment;                       /* Nothing to change */
 }
 
 
 
 /*
-  Common routine to handle delete_table and rename_table.
-  The routine uses the partition handler file to get the
-  names of the partition instances. Both these routines
-  are called after creating the handler without table
-  object and thus the file is needed to discover the
-  names of the partitions and the underlying storage engines.
+  Handle delete, rename and create table
+
+  SYNOPSIS
+    del_ren_cre_table()
+    from                    Full path of old table
+    to                      Full path of new table
+    table_arg               Table object
+    create_info             Create info
+
+  RETURN VALUE
+    >0                      Error
+    0                       Success
+
+  DESCRIPTION
+    Common routine to handle delete_table and rename_table.
+    The routine uses the partition handler file to get the
+    names of the partition instances. Both these routines
+    are called after creating the handler without table
+    object and thus the file is needed to discover the
+    names of the partitions and the underlying storage engines.
 */
 
 uint ha_partition::del_ren_cre_table(const char *from,
@@ -480,7 +1667,8 @@
 				     TABLE *table_arg,
 				     HA_CREATE_INFO *create_info)
 {
-  int save_error= 0, error;
+  int save_error= 0;
+  int error;
   char from_buff[FN_REFLEN], to_buff[FN_REFLEN];
   char *name_buffer_ptr;
   uint i;
@@ -495,10 +1683,12 @@
   i= 0;
   do
   {
-    create_partition_name(from_buff, from, name_buffer_ptr);
+    create_partition_name(from_buff, from, name_buffer_ptr, NORMAL_PART_NAME,
+                          FALSE);
     if (to != NULL)
     {						// Rename branch
-      create_partition_name(to_buff, to, name_buffer_ptr);
+      create_partition_name(to_buff, to, name_buffer_ptr, NORMAL_PART_NAME,
+                            FALSE);
       error= (*file)->rename_table((const char*) from_buff,
 				   (const char*) to_buff);
     }
@@ -517,12 +1707,23 @@
   DBUG_RETURN(save_error);
 }
 
+/*
+  Find partition based on partition id
+
+  SYNOPSIS
+    find_partition_element()
+    part_id                   Partition id of partition looked for
+
+  RETURN VALUE
+    >0                        Reference to partition_element
+    0                         Partition not found
+*/
 
 partition_element *ha_partition::find_partition_element(uint part_id)
 {
   uint i;
   uint curr_part_id= 0;
-  List_iterator_fast < partition_element > part_it(m_part_info->partitions);
+  List_iterator_fast <partition_element> part_it(m_part_info->partitions);
 
   for (i= 0; i < m_part_info->no_parts; i++)
   {
@@ -548,18 +1749,32 @@
 }
 
 
+/*
+   Set up table share object before calling create on underlying handler
+
+   SYNOPSIS
+     set_up_table_before_create()
+     table                       Table object
+     info                        Create info
+     part_id                     Partition id of partition to set-up
+
+   RETURN VALUE
+     NONE
+
+   DESCRIPTION
+     Set up
+     1) Comment on partition
+     2) MAX_ROWS, MIN_ROWS on partition
+     3) Index file name on partition
+     4) Data file name on partition
+*/
+
 void ha_partition::set_up_table_before_create(TABLE *table,
 					      HA_CREATE_INFO *info,
 					      uint part_id)
 {
-  /*
-    Set up
-    1) Comment on partition
-    2) MAX_ROWS, MIN_ROWS on partition
-    3) Index file name on partition
-    4) Data file name on partition
-  */
   partition_element *part_elem= find_partition_element(part_id);
+
   if (!part_elem)
     return;                                     // Fatal error
   table->s->max_rows= part_elem->part_max_rows;
@@ -570,53 +1785,95 @@
 
 
 /*
-  Routine used to add two names with '_' in between then. Service routine
-  to create_handler_file
-  Include the NULL in the count of characters since it is needed as separator
-  between the partition names.
+  Add two names together
+
+  SYNOPSIS
+    name_add()
+    out:dest                          Destination string
+    first_name                        First name
+    sec_name                          Second name
+
+  RETURN VALUE
+    >0                                Error
+    0                                 Success
+
+  DESCRIPTION
+    Routine used to add two names with '_' in between then. Service routine
+    to create_handler_file
+    Include the NULL in the count of characters since it is needed as separator
+    between the partition names.
 */
 
 static uint name_add(char *dest, const char *first_name, const char *sec_name)
 {
-  return (uint) (strxmov(dest, first_name, "_", sec_name, NullS) -dest) + 1;
+  return (uint) (strxmov(dest, first_name, "#SP#", sec_name, NullS) -dest) + 1;
 }
 
 
 /*
-  Method used to create handler file with names of partitions, their
-  engine types and the number of partitions.
+  Create the special .par file
+
+  SYNOPSIS
+    create_handler_file()
+    name                      Full path of table name
+
+  RETURN VALUE
+    >0                        Error code
+    0                         Success
+
+  DESCRIPTION
+    Method used to create handler file with names of partitions, their
+    engine types and the number of partitions.
 */
 
 bool ha_partition::create_handler_file(const char *name)
 {
   partition_element *part_elem, *subpart_elem;
   uint i, j, part_name_len, subpart_name_len;
-  uint tot_partition_words, tot_name_len;
+  uint tot_partition_words, tot_name_len, no_parts;
+  uint tot_parts= 0;
   uint tot_len_words, tot_len_byte, chksum, tot_name_words;
   char *name_buffer_ptr;
   uchar *file_buffer, *engine_array;
   bool result= TRUE;
   char file_name[FN_REFLEN];
+  char part_name[FN_REFLEN];
+  char subpart_name[FN_REFLEN];
   File file;
-  List_iterator_fast < partition_element > part_it(m_part_info->partitions);
+  List_iterator_fast <partition_element> part_it(m_part_info->partitions);
   DBUG_ENTER("create_handler_file");
 
-  DBUG_PRINT("info", ("table name = %s", name));
+  no_parts= m_part_info->partitions.elements;
+  DBUG_PRINT("info", ("table name = %s, no_parts = %u", name,
+                      no_parts));
   tot_name_len= 0;
-  for (i= 0; i < m_part_info->no_parts; i++)
+  for (i= 0; i < no_parts; i++)
   {
     part_elem= part_it++;
-    part_name_len= strlen(part_elem->partition_name);
+    if (part_elem->part_state != PART_NORMAL &&
+        part_elem->part_state != PART_IS_ADDED &&
+        part_elem->part_state != PART_IS_CHANGED)
+      continue;
+    tablename_to_filename(part_elem->partition_name, part_name,
+                          FN_REFLEN);
+    part_name_len= strlen(part_name);
     if (!m_is_sub_partitioned)
+    {
       tot_name_len+= part_name_len + 1;
+      tot_parts++;
+    }
     else
     {
-      List_iterator_fast<partition_element> sub_it(part_elem->subpartitions);
+      List_iterator_fast <partition_element> sub_it(part_elem->subpartitions);
       for (j= 0; j < m_part_info->no_subparts; j++)
       {
 	subpart_elem= sub_it++;
-	subpart_name_len= strlen(subpart_elem->partition_name);
-	tot_name_len+= part_name_len + subpart_name_len + 2;
+        tablename_to_filename(subpart_elem->partition_name,
+                              subpart_name,
+                              FN_REFLEN);
+	subpart_name_len= strlen(subpart_name);
+	tot_name_len+= part_name_len + subpart_name_len + 5;
+        tot_parts++;
       }
     }
   }
@@ -633,7 +1890,7 @@
 
      All padding bytes are zeroed
   */
-  tot_partition_words= (m_tot_parts + 3) / 4;
+  tot_partition_words= (tot_parts + 3) / 4;
   tot_name_words= (tot_name_len + 3) / 4;
   tot_len_words= 4 + tot_partition_words + tot_name_words;
   tot_len_byte= 4 * tot_len_words;
@@ -642,25 +1899,34 @@
   engine_array= (file_buffer + 12);
   name_buffer_ptr= (char*) (file_buffer + ((4 + tot_partition_words) * 4));
   part_it.rewind();
-  for (i= 0; i < m_part_info->no_parts; i++)
+  for (i= 0; i < no_parts; i++)
   {
     part_elem= part_it++;
+    if (part_elem->part_state != PART_NORMAL &&
+        part_elem->part_state != PART_IS_ADDED &&
+        part_elem->part_state != PART_IS_CHANGED)
+      continue;
     if (!m_is_sub_partitioned)
     {
-      name_buffer_ptr= strmov(name_buffer_ptr, part_elem->partition_name)+1;
+      tablename_to_filename(part_elem->partition_name, part_name, FN_REFLEN);
+      name_buffer_ptr= strmov(name_buffer_ptr, part_name)+1;
       *engine_array= (uchar) ha_legacy_type(part_elem->engine_type);
       DBUG_PRINT("info", ("engine: %u", *engine_array));
       engine_array++;
     }
     else
     {
-      List_iterator_fast<partition_element> sub_it(part_elem->subpartitions);
+      List_iterator_fast <partition_element> sub_it(part_elem->subpartitions);
       for (j= 0; j < m_part_info->no_subparts; j++)
       {
 	subpart_elem= sub_it++;
+        tablename_to_filename(part_elem->partition_name, part_name,
+                              FN_REFLEN);
+        tablename_to_filename(subpart_elem->partition_name, subpart_name,
+                              FN_REFLEN);
 	name_buffer_ptr+= name_add(name_buffer_ptr,
-				    part_elem->partition_name,
-				    subpart_elem->partition_name);
+				   part_name,
+				   subpart_name);
 	*engine_array= (uchar) ha_legacy_type(part_elem->engine_type);
 	engine_array++;
       }
@@ -668,7 +1934,7 @@
   }
   chksum= 0;
   int4store(file_buffer, tot_len_words);
-  int4store(file_buffer + 8, m_tot_parts);
+  int4store(file_buffer + 8, tot_parts);
   int4store(file_buffer + 12 + (tot_partition_words * 4), tot_name_len);
   for (i= 0; i < tot_len_words; i++)
     chksum^= uint4korr(file_buffer + 4 * i);
@@ -692,6 +1958,15 @@
   DBUG_RETURN(result);
 }
 
+/*
+  Clear handler variables and free some memory
+
+  SYNOPSIS
+    clear_handler_file()
+
+  RETURN VALUE 
+    NONE
+*/
 
 void ha_partition::clear_handler_file()
 {
@@ -702,6 +1977,16 @@
   m_engine_array= NULL;
 }
 
+/*
+  Create underlying handler objects
+
+  SYNOPSIS
+    create_handlers()
+
+  RETURN VALUE
+    TRUE                  Error
+    FALSE                 Success
+*/
 
 bool ha_partition::create_handlers()
 {
@@ -735,10 +2020,20 @@
   DBUG_RETURN(FALSE);
 }
 
+/*
+  Create underlying handler objects from partition info
+
+  SYNOPSIS
+    new_handlers_from_part_info()
+
+  RETURN VALUE
+    TRUE                  Error
+    FALSE                 Success
+*/
 
 bool ha_partition::new_handlers_from_part_info()
 {
-  uint i, j;
+  uint i, j, part_count;
   partition_element *part_elem;
   uint alloc_len= (m_tot_parts + 1) * sizeof(handler*);
   List_iterator_fast <partition_element> part_it(m_part_info->partitions);
@@ -746,11 +2041,15 @@
   DBUG_ENTER("ha_partition::new_handlers_from_part_info");
 
   if (!(m_file= (handler **) sql_alloc(alloc_len)))
-    goto error;
+  {
+    mem_alloc_error(alloc_len);
+    goto error_end;
+  }
   bzero(m_file, alloc_len);
   DBUG_ASSERT(m_part_info->no_parts > 0);
 
   i= 0;
+  part_count= 0;
   /*
     Don't know the size of the underlying storage engine, invent a number of
     bytes allocated for error message if allocation fails
@@ -759,10 +2058,6 @@
   do
   {
     part_elem= part_it++;
-    if (!(m_file[i]= get_new_handler(table_share, thd->mem_root,
-                                     part_elem->engine_type)))
-      goto error;
-    DBUG_PRINT("info", ("engine_type: %u", (uint) ha_legacy_type(part_elem->engine_type)));
     if (m_is_sub_partitioned)
     {
       for (j= 0; j < m_part_info->no_subparts; j++)
@@ -770,9 +2065,18 @@
 	if (!(m_file[i]= get_new_handler(table_share, thd->mem_root,
                                          part_elem->engine_type)))
           goto error;
-	DBUG_PRINT("info", ("engine_type: %u", (uint) ha_legacy_type(part_elem->engine_type)));
+	DBUG_PRINT("info", ("engine_type: %u",
+                   (uint) ha_legacy_type(part_elem->engine_type)));
       }
     }
+    else
+    {
+      if (!(m_file[part_count++]= get_new_handler(table_share, thd->mem_root,
+                                                  part_elem->engine_type)))
+        goto error;
+      DBUG_PRINT("info", ("engine_type: %u",
+                 (uint) ha_legacy_type(part_elem->engine_type)));
+    }
   } while (++i < m_part_info->no_parts);
   if (part_elem->engine_type == &myisam_hton)
   {
@@ -781,14 +2085,26 @@
   }
   DBUG_RETURN(FALSE);
 error:
-  my_error(ER_OUTOFMEMORY, MYF(0), alloc_len);
+  mem_alloc_error(sizeof(handler));
+error_end:
   DBUG_RETURN(TRUE);
 }
 
 
 /*
-  Open handler file to get partition names, engine types and number of
-  partitions.
+  Get info about partition engines and their names from the .par file
+
+  SYNOPSIS
+    get_from_handler_file()
+    name                        Full path of table name
+
+  RETURN VALUE
+    TRUE                        Error
+    FALSE                       Success
+
+  DESCRIPTION
+    Open handler file to get partition names, engine types and number of
+    partitions.
 */
 
 bool ha_partition::get_from_handler_file(const char *name)
@@ -824,6 +2140,7 @@
   if (chksum)
     goto err2;
   m_tot_parts= uint4korr((file_buffer) + 8);
+  DBUG_PRINT("info", ("No of parts = %u", m_tot_parts));
   tot_partition_words= (m_tot_parts + 3) / 4;
   if (!(engine_array= (handlerton **) my_malloc(m_tot_parts * sizeof(handlerton*),MYF(0))))
     goto err2;
@@ -858,13 +2175,26 @@
                 MODULE open/close object
 ****************************************************************************/
 /*
-  Used for opening tables. The name will be the name of the file.
-  A table is opened when it needs to be opened. For instance
-  when a request comes in for a select on the table (tables are not
-  open and closed for each request, they are cached).
+  Open handler object
+
+  SYNOPSIS
+    open()
+    name                  Full path of table name
+    mode                  Open mode flags
+    test_if_locked        ?
+
+  RETURN VALUE
+    >0                    Error
+    0                     Success
+
+  DESCRIPTION
+    Used for opening tables. The name will be the name of the file.
+    A table is opened when it needs to be opened. For instance
+    when a request comes in for a select on the table (tables are not
+    open and closed for each request, they are cached).
 
-  Called from handler.cc by handler::ha_open(). The server opens all tables
-  by calling ha_open() which then calls the handler specific open().
+    Called from handler.cc by handler::ha_open(). The server opens all tables
+    by calling ha_open() which then calls the handler specific open().
 */
 
 int ha_partition::open(const char *name, int mode, uint test_if_locked)
@@ -877,6 +2207,8 @@
   DBUG_ENTER("ha_partition::open");
 
   ref_length= 0;
+  m_mode= mode;
+  m_open_test_lock= test_if_locked;
   m_part_field_array= m_part_info->full_part_field_array;
   if (get_from_handler_file(name))
     DBUG_RETURN(1);
@@ -923,7 +2255,8 @@
   file= m_file;
   do
   {
-    create_partition_name(name_buff, name, name_buffer_ptr);
+    create_partition_name(name_buff, name, name_buffer_ptr, NORMAL_PART_NAME,
+                          FALSE);
     if ((error= (*file)->ha_open(table, (const char*) name_buff, mode,
                                  test_if_locked)))
       goto err_handler;
@@ -946,7 +2279,7 @@
   /*
     Initialise priority queue, initialised to reading forward.
   */
-  if ((error= init_queue(&queue, m_tot_parts, (uint) PARTITION_BYTES_IN_POS,
+  if ((error= init_queue(&m_queue, m_tot_parts, (uint) PARTITION_BYTES_IN_POS,
                          0, key_rec_cmp, (void*)this)))
     goto err_handler;
 
@@ -967,15 +2300,23 @@
   DBUG_RETURN(error);
 }
 
+
 /*
-  Closes a table. We call the free_share() function to free any resources
-  that we have allocated in the "shared" structure.
+  Close handler object
+
+  SYNOPSIS
+    close()
 
-  Called from sql_base.cc, sql_select.cc, and table.cc.
-  In sql_select.cc it is only used to close up temporary tables or during
-  the process where a temporary table is converted over to being a
-  myisam table.
-  For sql_base.cc look at close_data_tables().
+  RETURN VALUE
+    >0                   Error code
+    0                    Success
+
+  DESCRIPTION
+    Called from sql_base.cc, sql_select.cc, and table.cc.
+    In sql_select.cc it is only used to close up temporary tables or during
+    the process where a temporary table is converted over to being a
+    myisam table.
+    For sql_base.cc look at close_data_tables().
 */
 
 int ha_partition::close(void)
@@ -1007,27 +2348,40 @@
 */
 
 /*
-  First you should go read the section "locking functions for mysql" in
-  lock.cc to understand this.
-  This create a lock on the table. If you are implementing a storage engine
-  that can handle transactions look at ha_berkely.cc to see how you will
-  want to goo about doing this. Otherwise you should consider calling
-  flock() here.
-  Originally this method was used to set locks on file level to enable
-  several MySQL Servers to work on the same data. For transactional
-  engines it has been "abused" to also mean start and end of statements
-  to enable proper rollback of statements and transactions. When LOCK
-  TABLES has been issued the start_stmt method takes over the role of
-  indicating start of statement but in this case there is no end of
-  statement indicator(?).
+  Set external locks on table
+
+  SYNOPSIS
+    external_lock()
+    thd                    Thread object
+    lock_type              Type of external lock
+
+  RETURN VALUE
+    >0                   Error code
+    0                    Success
+
+  DESCRIPTION
+    First you should go read the section "locking functions for mysql" in
+    lock.cc to understand this.
+    This create a lock on the table. If you are implementing a storage engine
+    that can handle transactions look at ha_berkeley.cc to see how you will
+    want to go about doing this. Otherwise you should consider calling
+    flock() here.
+    Originally this method was used to set locks on file level to enable
+    several MySQL Servers to work on the same data. For transactional
+    engines it has been "abused" to also mean start and end of statements
+    to enable proper rollback of statements and transactions. When LOCK
+    TABLES has been issued the start_stmt method takes over the role of
+    indicating start of statement but in this case there is no end of
+    statement indicator(?).
 
-  Called from lock.cc by lock_external() and unlock_external(). Also called
-  from sql_table.cc by copy_data_between_tables().
+    Called from lock.cc by lock_external() and unlock_external(). Also called
+    from sql_table.cc by copy_data_between_tables().
 */
 
 int ha_partition::external_lock(THD *thd, int lock_type)
 {
   uint error;
+  handler **file;
   DBUG_ENTER("ha_partition::external_lock");
   DBUG_PRINT("info", ("m_part_info->used_partitions %lx \
                       m_part_info->used_partitions_copy %lx",
@@ -1079,36 +2433,49 @@
 
 
 /*
-  The idea with handler::store_lock() is the following:
+  Get the lock(s) for the table and perform conversion of locks if needed
 
-  The statement decided which locks we should need for the table
-  for updates/deletes/inserts we get WRITE locks, for SELECT... we get
-  read locks.
-
-  Before adding the lock into the table lock handler (see thr_lock.c)
-  mysqld calls store lock with the requested locks.  Store lock can now
-  modify a write lock to a read lock (or some other lock), ignore the
-  lock (if we don't want to use MySQL table locks at all) or add locks
-  for many tables (like we do when we are using a MERGE handler).
-
-  Berkeley DB for partition  changes all WRITE locks to TL_WRITE_ALLOW_WRITE
-  (which signals that we are doing WRITES, but we are still allowing other
-  reader's and writer's.
-
-  When releasing locks, store_lock() are also called. In this case one
-  usually doesn't have to do anything.
-
-  store_lock is called when holding a global mutex to ensure that only
-  one thread at a time changes the locking information of tables.
-
-  In some exceptional cases MySQL may send a request for a TL_IGNORE;
-  This means that we are requesting the same lock as last time and this
-  should also be ignored. (This may happen when someone does a flush
-  table when we have opened a part of the tables, in which case mysqld
-  closes and reopens the tables and tries to get the same locks at last
-  time).  In the future we will probably try to remove this.
+  SYNOPSIS
+    store_lock()
+    thd                   Thread object
+    to                    Lock object array
+    lock_type             Table lock type
+
+  RETURN VALUE
+    >0                   Error code
+    0                    Success
+
+  DESCRIPTION
+    The idea with handler::store_lock() is the following:
+
+    The statement decided which locks we should need for the table
+    for updates/deletes/inserts we get WRITE locks, for SELECT... we get
+    read locks.
+
+    Before adding the lock into the table lock handler (see thr_lock.c)
+    mysqld calls store lock with the requested locks.  Store lock can now
+    modify a write lock to a read lock (or some other lock), ignore the
+    lock (if we don't want to use MySQL table locks at all) or add locks
+    for many tables (like we do when we are using a MERGE handler).
+
+    Berkeley DB for partition  changes all WRITE locks to TL_WRITE_ALLOW_WRITE
+    (which signals that we are doing WRITES, but we are still allowing other
+    reader's and writer's.
+
+    When releasing locks, store_lock() is also called. In this case one
+    usually doesn't have to do anything.
+
+    store_lock is called when holding a global mutex to ensure that only
+    one thread at a time changes the locking information of tables.
+
+    In some exceptional cases MySQL may send a request for a TL_IGNORE;
+    This means that we are requesting the same lock as last time and this
+    should also be ignored. (This may happen when someone does a flush
+    table when we have opened a part of the tables, in which case mysqld
+    closes and reopens the tables and tries to get the same locks as last
+    time).  In the future we will probably try to remove this.
 
-  Called from lock.cc by get_lock_data().
+    Called from lock.cc by get_lock_data().
 */
 
 THR_LOCK_DATA **ha_partition::store_lock(THD *thd,
@@ -1130,6 +2497,23 @@
   DBUG_RETURN(to);
 }
 
+/*
+  Start a statement when table is locked
+
+  SYNOPSIS
+    start_stmt()
+    thd                  Thread object
+    lock_type            Type of external lock
+
+  RETURN VALUE
+    >0                   Error code
+    0                    Success
+
+  DESCRIPTION
+    This method is called instead of external lock when the table is locked
+    before the statement is executed.
+*/
+
 int ha_partition::start_stmt(THD *thd, thr_lock_type lock_type)
 {
   int i= 0;
@@ -1172,10 +2556,19 @@
 
 
 /*
-  Returns the number of store locks needed in call to store lock.
-  We return number of partitions since we call store_lock on each
-  underlying handler. Assists the above functions in allocating
-  sufficient space for lock structures.
+  Get number of lock objects returned in store_lock
+
+  SYNOPSIS
+    lock_count()
+
+  RETURN VALUE
+    Number of locks returned in call to store_lock
+
+  DESCRIPTION
+    Returns the number of store locks needed in call to store lock.
+    We return number of partitions since we call store_lock on each
+    underlying handler. Assists the above functions in allocating
+    sufficient space for lock structures.
 */
 
 uint ha_partition::lock_count()
@@ -1201,8 +2594,17 @@
 
 
 /*
-  Record currently processed was not in the result set of the statement
-  and is thus unlocked. Used for UPDATE and DELETE queries.
+  Unlock last accessed row
+
+  SYNOPSIS
+    unlock_row()
+
+  RETURN VALUE
+    NONE
+
+  DESCRIPTION
+    Record currently processed was not in the result set of the statement
+    and is thus unlocked. Used for UPDATE and DELETE queries.
 */
 
 void ha_partition::unlock_row()
@@ -1217,43 +2619,56 @@
 ****************************************************************************/
 
 /*
-  write_row() inserts a row. buf() is a byte array of data, normally record[0].
-
-  You can use the field information to extract the data from the native byte
-  array type.
+  Insert a row to the table
 
-  Example of this would be:
-  for (Field **field=table->field ; *field ; field++)
-  {
-    ...
-  }
-
-  See ha_tina.cc for an partition of extracting all of the data as strings.
-  ha_berekly.cc has an partition of how to store it intact by "packing" it
-  for ha_berkeley's own native storage type.
-
-  See the note for update_row() on auto_increments and timestamps. This
-  case also applied to write_row().
+  SYNOPSIS
+    write_row()
+    buf                        The row in MySQL Row Format
 
-  Called from item_sum.cc, item_sum.cc, sql_acl.cc, sql_insert.cc,
-  sql_insert.cc, sql_select.cc, sql_table.cc, sql_udf.cc, and sql_update.cc.
+  RETURN VALUE
+    >0                         Error code
+    0                          Success
+
+  DESCRIPTION
+    write_row() inserts a row. buf() is a byte array of data, normally
+    record[0].
 
-  ADDITIONAL INFO:
+    You can use the field information to extract the data from the native byte
+    array type.
 
-  Most handlers set timestamp when calling write row if any such fields
-  exists. Since we are calling an underlying handler we assume the
-  underlying handler will assume this responsibility.
+    Example of this would be:
+    for (Field **field=table->field ; *field ; field++)
+    {
+      ...
+    }
 
-  Underlying handlers will also call update_auto_increment to calculate
-  the new auto increment value. We will catch the call to
-  get_auto_increment and ensure this increment value is maintained by
-  only one of the underlying handlers.
+    See ha_tina.cc for a variant of extracting all of the data as strings.
+    ha_berkeley.cc has a variant of how to store it intact by "packing" it
+    for ha_berkeley's own native storage type.
+
+    See the note for update_row() on auto_increments and timestamps. This
+    case also applied to write_row().
+
+    Called from item_sum.cc, item_sum.cc, sql_acl.cc, sql_insert.cc,
+    sql_insert.cc, sql_select.cc, sql_table.cc, sql_udf.cc, and sql_update.cc.
+
+    ADDITIONAL INFO:
+
+    Most handlers set timestamp when calling write row if any such fields
+    underlying handler will assume this responsibility.
+
+    Underlying handlers will also call update_auto_increment to calculate
+    the new auto increment value. We will catch the call to
+    get_auto_increment and ensure this increment value is maintained by
+    only one of the underlying handlers.
 */
 
 int ha_partition::write_row(byte * buf)
 {
   uint32 part_id;
   int error;
+  longlong func_value;
 #ifdef NOT_NEEDED
   byte *rec0= m_rec0;
 #endif
@@ -1263,17 +2678,19 @@
 #ifdef NOT_NEEDED
   if (likely(buf == rec0))
 #endif
-    error= m_part_info->get_partition_id(m_part_info, &part_id);
+    error= m_part_info->get_partition_id(m_part_info, &part_id,
+                                         &func_value);
 #ifdef NOT_NEEDED
   else
   {
     set_field_ptr(m_part_field_array, buf, rec0);
-    error= m_part_info->get_partition_id(m_part_info, &part_id);
+    error= m_part_info->get_partition_id(m_part_info, &part_id,
+                                         &func_value);
     set_field_ptr(m_part_field_array, rec0, buf);
   }
 #endif
   if (unlikely(error))
-    DBUG_RETURN(HA_ERR_NO_PARTITION_FOUND);
+    DBUG_RETURN(error);
   m_last_part= part_id;
   DBUG_PRINT("info", ("Insert in partition %d", part_id));
   DBUG_RETURN(m_file[part_id]->write_row(buf));
@@ -1281,33 +2698,46 @@
 
 
 /*
-  Yes, update_row() does what you expect, it updates a row. old_data will
-  have the previous row record in it, while new_data will have the newest
-  data in it.
-  Keep in mind that the server can do updates based on ordering if an
-  ORDER BY clause was used. Consecutive ordering is not guarenteed.
-
-  Currently new_data will not have an updated auto_increament record, or
-  and updated timestamp field. You can do these for partition by doing these:
-  if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
-    table->timestamp_field->set_time();
-  if (table->next_number_field && record == table->record[0])
-    update_auto_increment();
-
-  Called from sql_select.cc, sql_acl.cc, sql_update.cc, and sql_insert.cc.
-  new_data is always record[0]
-  old_data is normally record[1] but may be anything
+  Update an existing row
 
+  SYNOPSIS
+    update_row()
+    old_data                 Old record in MySQL Row Format
+    new_data                 New record in MySQL Row Format
+
+  RETURN VALUE
+    >0                         Error code
+    0                          Success
+
+  DESCRIPTION
+    Yes, update_row() does what you expect, it updates a row. old_data will
+    have the previous row record in it, while new_data will have the newest
+    data in it.
+    Keep in mind that the server can do updates based on ordering if an
+    ORDER BY clause was used. Consecutive ordering is not guarenteed.
+
+    Currently new_data will not have an updated auto_increament record, or
+    and updated timestamp field. You can do these for partition by doing these:
+    if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
+      table->timestamp_field->set_time();
+    if (table->next_number_field && record == table->record[0])
+      update_auto_increment();
+
+    Called from sql_select.cc, sql_acl.cc, sql_update.cc, and sql_insert.cc.
+    new_data is always record[0]
+    old_data is normally record[1] but may be anything
 */
 
 int ha_partition::update_row(const byte *old_data, byte *new_data)
 {
   uint32 new_part_id, old_part_id;
   int error;
+  longlong func_value;
   DBUG_ENTER("ha_partition::update_row");
 
   if ((error= get_parts_for_update(old_data, new_data, table->record[0],
-                                  m_part_info, &old_part_id, &new_part_id)))
+                                   m_part_info, &old_part_id, &new_part_id,
+                                   &func_value)))
   {
     DBUG_RETURN(error);
   }
@@ -1342,21 +2772,31 @@
 
 
 /*
-  This will delete a row. buf will contain a copy of the row to be deleted.
-  The server will call this right after the current row has been read
-  (from either a previous rnd_xxx() or index_xxx() call).
-  If you keep a pointer to the last row or can access a primary key it will
-  make doing the deletion quite a bit easier.
-  Keep in mind that the server does no guarentee consecutive deletions.
-  ORDER BY clauses can be used.
-
-  Called in sql_acl.cc and sql_udf.cc to manage internal table information.
-  Called in sql_delete.cc, sql_insert.cc, and sql_select.cc. In sql_select
-  it is used for removing duplicates while in insert it is used for REPLACE
-  calls.
+  Remove an existing row
+
+  SYNOPSIS
+    delete_row
+    buf                      Deleted row in MySQL Row Format
 
-  buf is either record[0] or record[1]
+  RETURN VALUE
+    >0                       Error Code
+    0                        Success
+
+  DESCRIPTION
+    This will delete a row. buf will contain a copy of the row to be deleted.
+    The server will call this right after the current row has been read
+    (from either a previous rnd_xxx() or index_xxx() call).
+    If you keep a pointer to the last row or can access a primary key it will
+    make doing the deletion quite a bit easier.
+    Keep in mind that the server does no guarentee consecutive deletions.
+    ORDER BY clauses can be used.
+
+    Called in sql_acl.cc and sql_udf.cc to manage internal table information.
+    Called in sql_delete.cc, sql_insert.cc, and sql_select.cc. In sql_select
+    it is used for removing duplicates while in insert it is used for REPLACE
+    calls.
 
+    buf is either record[0] or record[1]
 */
 
 int ha_partition::delete_row(const byte *buf)
@@ -1375,15 +2815,25 @@
 
 
 /*
-  Used to delete all rows in a table. Both for cases of truncate and
-  for cases where the optimizer realizes that all rows will be
-  removed as a result of a SQL statement.
-
-  Called from item_sum.cc by Item_func_group_concat::clear(),
-  Item_sum_count_distinct::clear(), and Item_func_group_concat::clear().
-  Called from sql_delete.cc by mysql_delete().
-  Called from sql_select.cc by JOIN::reinit().
-  Called from sql_union.cc by st_select_lex_unit::exec().
+  Delete all rows in a table
+
+  SYNOPSIS
+    delete_all_rows()
+
+  RETURN VALUE
+    >0                       Error Code
+    0                        Success
+
+  DESCRIPTION
+    Used to delete all rows in a table. Both for cases of truncate and
+    for cases where the optimizer realizes that all rows will be
+    removed as a result of a SQL statement.
+
+    Called from item_sum.cc by Item_func_group_concat::clear(),
+    Item_sum_count_distinct::clear(), and Item_func_group_concat::clear().
+    Called from sql_delete.cc by mysql_delete().
+    Called from sql_select.cc by JOIN::reinit().
+    Called from sql_union.cc by st_select_lex_unit::exec().
 */
 
 int ha_partition::delete_all_rows()
@@ -1391,6 +2841,7 @@
   int error;
   handler **file;
   DBUG_ENTER("ha_partition::delete_all_rows");
+
   file= m_file;
   do
   {
@@ -1401,14 +2852,26 @@
   DBUG_RETURN(0);
 }
 
+
 /*
-  rows == 0 means we will probably insert many rows
+  Start a large batch of insert rows
+
+  SYNOPSIS
+    start_bulk_insert()
+    rows                  Number of rows to insert
+
+  RETURN VALUE
+    NONE
+
+  DESCRIPTION
+    rows == 0 means we will probably insert many rows
 */
 
 void ha_partition::start_bulk_insert(ha_rows rows)
 {
   handler **file;
   DBUG_ENTER("ha_partition::start_bulk_insert");
+
   if (!rows)
   {
     /* Avoid allocation big caches in all underlaying handlers */
@@ -1425,6 +2888,17 @@
 }
 
 
+/*
+  Finish a large batch of insert rows
+
+  SYNOPSIS
+    end_bulk_insert()
+
+  RETURN VALUE
+    >0                      Error code
+    0                       Success
+*/
+
 int ha_partition::end_bulk_insert()
 {
   int error= 0;
@@ -1443,6 +2917,7 @@
   DBUG_RETURN(error);
 }
 
+
 /****************************************************************************
                 MODULE full table scan
 ****************************************************************************/
@@ -1454,18 +2929,22 @@
     scan	0  Initialize for random reads through rnd_pos()
 		1  Initialize for random scan through rnd_next()
 
-  NOTES
-  rnd_init() is called when the server wants the storage engine to do a
-  table scan or when the server wants to access data through rnd_pos.
-
-  When scan is used we will scan one handler partition at a time.
-  When preparing for rnd_pos we will init all handler partitions.
-  No extra cache handling is needed when scannning is not performed.
-
-  Before initialising we will call rnd_end to ensure that we clean up from
-  any previous incarnation of a table scan.
-  Called from filesort.cc, records.cc, sql_handler.cc, sql_select.cc,
-  sql_table.cc, and sql_update.cc.
+  RETURN VALUE
+    >0          Error code
+    0           Success
+
+  DESCRIPTION 
+    rnd_init() is called when the server wants the storage engine to do a
+    table scan or when the server wants to access data through rnd_pos.
+
+    When scan is used we will scan one handler partition at a time.
+    When preparing for rnd_pos we will init all handler partitions.
+    No extra cache handling is needed when scannning is not performed.
+
+    Before initialising we will call rnd_end to ensure that we clean up from
+    any previous incarnation of a table scan.
+    Called from filesort.cc, records.cc, sql_handler.cc, sql_select.cc,
+    sql_table.cc, and sql_update.cc.
 */
 
 int ha_partition::rnd_init(bool scan)
@@ -1531,6 +3010,17 @@
 }
 
 
+/*
+  End of a table scan
+
+  SYNOPSIS
+    rnd_end()
+
+  RETURN VALUE
+    >0          Error code
+    0           Success
+*/
+
 int ha_partition::rnd_end()
 {
   int i= 0;
@@ -1538,24 +3028,24 @@
   DBUG_ENTER("ha_partition::rnd_end");
   switch (m_scan_value) 
   {
-    case 2:                                       // Error
-      break;
-    case 1:
-      if (MY_BIT_NONE != current_partition_index)         // Table scan
-      {
-        late_extra_no_cache(current_partition_index);
-        m_file[current_partition_index]->ha_rnd_end();
-      }
-      break;
-    case 0:
-      int i=0;
-      file= m_file;
-      do
-      {
-        if (_bitmap_is_set(&(m_part_info->used_partitions), (file - m_file)))
-          (*file)->ha_rnd_end();
-      } while (*(++file));
-      break;
+  case 2:                                       // Error
+    break;
+  case 1:
+    if (MY_BIT_NONE != current_partition_index)         // Table scan
+    {
+      late_extra_no_cache(current_partition_index);
+      m_file[current_partition_index]->ha_rnd_end();
+    }
+    break;
+  case 0:
+    int i=0;
+    file= m_file;
+    do
+    {
+      if (_bitmap_is_set(&(m_part_info->used_partitions), (file - m_file)))
+        (*file)->ha_rnd_end();
+    } while (*(++file));
+    break;
   }
   m_scan_value= 2;
   DBUG_RETURN(0);
@@ -1568,18 +3058,22 @@
     rnd_next()
     buf		buffer that should be filled with data
 
-  This is called for each row of the table scan. When you run out of records
-  you should return HA_ERR_END_OF_FILE.
-  The Field structure for the table is the key to getting data into buf
-  in a manner that will allow the server to understand it.
+  RETURN VALUE
+    >0          Error code
+    0           Success
+
+  DESCRIPTION
+    This is called for each row of the table scan. When you run out of records
+    you should return HA_ERR_END_OF_FILE.
+    The Field structure for the table is the key to getting data into buf
+    in a manner that will allow the server to understand it.
 
-  Called from filesort.cc, records.cc, sql_handler.cc, sql_select.cc,
-  sql_table.cc, and sql_update.cc.
+    Called from filesort.cc, records.cc, sql_handler.cc, sql_select.cc,
+    sql_table.cc, and sql_update.cc.
 */
 
 int ha_partition::rnd_next(byte *buf)
 {
-  DBUG_ASSERT(m_scan_value);
   handler *file;
   int result= HA_ERR_END_OF_FILE;
   DBUG_ENTER("ha_partition::rnd_next");
@@ -1645,35 +3139,36 @@
 }
 
 
-inline void store_part_id_in_pos(byte *pos, uint part_id)
-{
-  int2store(pos, part_id);
-}
+/*
+  Save position of current row
 
-inline uint get_part_id_from_pos(const byte *pos)
-{
-  return uint2korr(pos);
-}
+  SYNOPSIS
+    position()
+    record             Current record in MySQL Row Format
 
-/*
-  position() is called after each call to rnd_next() if the data needs
-  to be ordered. You can do something like the following to store
-  the position:
-  ha_store_ptr(ref, ref_length, current_position);
+  RETURN VALUE
+    NONE
 
-  The server uses ref to store data. ref_length in the above case is
-  the size needed to store current_position. ref is just a byte array
-  that the server will maintain. If you are using offsets to mark rows, then
-  current_position should be the offset. If it is a primary key like in
-  BDB, then it needs to be a primary key.
+  DESCRIPTION
+    position() is called after each call to rnd_next() if the data needs
+    to be ordered. You can do something like the following to store
+    the position:
+    ha_store_ptr(ref, ref_length, current_position);
+
+    The server uses ref to store data. ref_length in the above case is
+    the size needed to store current_position. ref is just a byte array
+    that the server will maintain. If you are using offsets to mark rows, then
+    current_position should be the offset. If it is a primary key like in
+    BDB, then it needs to be a primary key.
 
-  Called from filesort.cc, sql_select.cc, sql_delete.cc and sql_update.cc.
+    Called from filesort.cc, sql_select.cc, sql_delete.cc and sql_update.cc.
 */
 
 void ha_partition::position(const byte *record)
 {
   handler *file= m_file[current_partition_index];
   DBUG_ENTER("ha_partition::position");
+
   file->position(record);
   store_part_id_in_pos(ref, current_partition_index);
   memcpy((ref + PARTITION_BYTES_IN_POS), file->ref,
@@ -1688,12 +3183,24 @@
 }
 
 /*
-  This is like rnd_next, but you are given a position to use
-  to determine the row. The position will be of the type that you stored in
-  ref. You can use ha_get_ptr(pos,ref_length) to retrieve whatever key
-  or position you saved when position() was called.
-  Called from filesort.cc records.cc sql_insert.cc sql_select.cc
-  sql_update.cc.
+  Read row using position
+
+  SYNOPSIS
+    rnd_pos()
+    out:buf                     Row read in MySQL Row Format
+    position                    Position of read row
+
+  RETURN VALUE
+    >0                          Error code
+    0                           Success
+
+  DESCRIPTION
+    This is like rnd_next, but you are given a position to use
+    to determine the row. The position will be of the type that you stored in
+    ref. You can use ha_get_ptr(pos,ref_length) to retrieve whatever key
+    or position you saved when position() was called.
+    Called from filesort.cc records.cc sql_insert.cc sql_select.cc
+    sql_update.cc.
 */
 
 int ha_partition::rnd_pos(byte * buf, byte *pos)
@@ -1702,7 +3209,7 @@
   handler *file;
   DBUG_ENTER("ha_partition::rnd_pos");
 
-  part_id= get_part_id_from_pos((const byte *) pos);
+  part_id= uint2korr((const byte *) pos);
   DBUG_ASSERT(part_id < m_tot_parts);
   file= m_file[part_id];
   current_partition_index= part_id;
@@ -1731,8 +3238,20 @@
 */
 
 /*
-  index_init is always called before starting index scans (except when
-  starting through index_read_idx and using read_range variants).
+  Initialise handler before start of index scan
+
+  SYNOPSIS
+    index_init()
+    inx                Index number
+    sorted             Is rows to be returned in sorted order
+
+  RETURN VALUE
+    >0                 Error code
+    0                  Success
+
+  DESCRIPTION
+    index_init is always called before starting index scans (except when
+    starting through index_read_idx and using read_range variants).
 */
 
 int ha_partition::index_init(uint inx, bool sorted)
@@ -1764,8 +3283,18 @@
 
 
 /*
-  index_end is called at the end of an index scan to clean up any
-  things needed to clean up.
+  End of index scan
+
+  SYNOPSIS
+    index_end()
+
+  RETURN VALUE
+    >0                 Error code
+    0                  Success
+
+  DESCRIPTION
+    index_end is called at the end of an index scan to clean up any
+    things needed to clean up.
 */
 
 int ha_partition::index_end()
@@ -1791,25 +3320,49 @@
 
 
 /*
-  index_read starts a new index scan using a start key. The MySQL Server
-  will check the end key on its own. Thus to function properly the
-  partitioned handler need to ensure that it delivers records in the sort
-  order of the MySQL Server.
-  index_read can be restarted without calling index_end on the previous
-  index scan and without calling index_init. In this case the index_read
-  is on the same index as the previous index_scan. This is particularly
-  used in conjuntion with multi read ranges.
+  Read one record in an index scan and start an index scan
+
+  SYNOPSIS
+    index_read()
+    buf                    Read row in MySQL Row Format
+    key                    Key parts in consecutive order
+    key_len                Total length of key parts
+    find_flag              What type of key condition is used
+
+  RETURN VALUE
+    >0                 Error code
+    0                  Success
+
+  DESCRIPTION
+    index_read starts a new index scan using a start key. The MySQL Server
+    will check the end key on its own. Thus to function properly the
+    partitioned handler need to ensure that it delivers records in the sort
+    order of the MySQL Server.
+    index_read can be restarted without calling index_end on the previous
+    index scan and without calling index_init. In this case the index_read
+    is on the same index as the previous index_scan. This is particularly
+    used in conjuntion with multi read ranges.
 */
 
 int ha_partition::index_read(byte * buf, const byte * key,
 			     uint key_len, enum ha_rkey_function find_flag)
 {
   DBUG_ENTER("ha_partition::index_read");
+
   end_range= 0;
   DBUG_RETURN(common_index_read(buf, key, key_len, find_flag));
 }
 
 
+/*
+  Common routine for a number of index_read variants
+
+  SYNOPSIS
+    common_index_read
+  
+  see index_read for rest
+*/
+
 int ha_partition::common_index_read(byte *buf, const byte *key, uint key_len,
 				    enum ha_rkey_function find_flag)
 {
@@ -1858,18 +3411,30 @@
 
 
 /*
-  index_first() asks for the first key in the index.
-  This is similar to index_read except that there is no start key since
-  the scan starts from the leftmost entry and proceeds forward with
-  index_next.
+  Start an index scan from leftmost record and return first record
+
+  SYNOPSIS
+    index_first()
+    buf                 Read row in MySQL Row Format
+
+  RETURN VALUE
+    >0                  Error code
+    0                   Success
+
+  DESCRIPTION
+    index_first() asks for the first key in the index.
+    This is similar to index_read except that there is no start key since
+    the scan starts from the leftmost entry and proceeds forward with
+    index_next.
 
-  Called from opt_range.cc, opt_sum.cc, sql_handler.cc,
-  and sql_select.cc.
+    Called from opt_range.cc, opt_sum.cc, sql_handler.cc,
+    and sql_select.cc.
 */
 
 int ha_partition::index_first(byte * buf)
 {
   DBUG_ENTER("ha_partition::index_first");
+
   end_range= 0;
   m_index_scan_type= partition_index_first;
   DBUG_RETURN(common_first_last(buf));
@@ -1877,25 +3442,47 @@
 
 
 /*
-  index_last() asks for the last key in the index.
-  This is similar to index_read except that there is no start key since
-  the scan starts from the rightmost entry and proceeds forward with
-  index_prev.
+  Start an index scan from rightmost record and return first record
+  
+  SYNOPSIS
+    index_last()
+    buf                 Read row in MySQL Row Format
 
-  Called from opt_range.cc, opt_sum.cc, sql_handler.cc,
-  and sql_select.cc.
+  RETURN VALUE
+    >0                  Error code
+    0                   Success
+
+  DESCRIPTION
+    index_last() asks for the last key in the index.
+    This is similar to index_read except that there is no start key since
+    the scan starts from the rightmost entry and proceeds forward with
+    index_prev.
+
+    Called from opt_range.cc, opt_sum.cc, sql_handler.cc,
+    and sql_select.cc.
 */
 
 int ha_partition::index_last(byte * buf)
 {
   DBUG_ENTER("ha_partition::index_last");
+
   m_index_scan_type= partition_index_last;
   DBUG_RETURN(common_first_last(buf));
 }
 
+/*
+  Common routine for index_first/index_last
+
+  SYNOPSIS
+    common_index_first_last
+  
+  see index_first for rest
+*/
+
 int ha_partition::common_first_last(byte *buf)
 {
   int error;
+
   if ((error= partition_scan_set_up(buf, FALSE)))
     return error;
   if (!m_ordered_scan_ongoing)
@@ -1903,10 +3490,18 @@
   return handle_ordered_index_scan(buf);
 }
 
+
 /*
-  Positions an index cursor to the index specified in key. Fetches the
-  row if any.  This is only used to read whole keys.
-  TODO: Optimise this code to avoid index_init and index_end
+  Perform index read using index where always only one row is returned
+
+  SYNOPSIS
+    index_read_idx()
+    see index_read for rest of parameters and return values
+
+  DESCRIPTION
+    Positions an index cursor to the index specified in key. Fetches the
+    row if any.  This is only used to read whole keys.
+    TODO: Optimise this code to avoid index_init and index_end
 */
 
 int ha_partition::index_read_idx(byte * buf, uint index, const byte * key,
@@ -1915,32 +3510,60 @@
 {
   int res;
   DBUG_ENTER("ha_partition::index_read_idx");
+
   index_init(index, 0);
   res= index_read(buf, key, key_len, find_flag);
   index_end();
   DBUG_RETURN(res);
 }
 
+
 /*
-  This is used in join_read_last_key to optimise away an ORDER BY.
-  Can only be used on indexes supporting HA_READ_ORDER
+  Read last using key
+
+  SYNOPSIS
+    index_read_last()
+    buf                   Read row in MySQL Row Format
+    key                   Key
+    keylen                Length of key
+
+  RETURN VALUE
+    >0                    Error code
+    0                     Success
+
+  DESCRIPTION
+    This is used in join_read_last_key to optimise away an ORDER BY.
+    Can only be used on indexes supporting HA_READ_ORDER
 */
 
 int ha_partition::index_read_last(byte *buf, const byte *key, uint keylen)
 {
   DBUG_ENTER("ha_partition::index_read_last");
+
   m_ordered= TRUE;				// Safety measure
   DBUG_RETURN(index_read(buf, key, keylen, HA_READ_PREFIX_LAST));
 }
 
 
 /*
-  Used to read forward through the index.
+  Read next record in a forward index scan
+
+  SYNOPSIS
+    index_next()
+    buf                   Read row in MySQL Row Format
+
+  RETURN VALUE
+    >0                    Error code
+    0                     Success
+
+  DESCRIPTION
+    Used to read forward through the index.
 */
 
 int ha_partition::index_next(byte * buf)
 {
   DBUG_ENTER("ha_partition::index_next");
+
   /*
     TODO(low priority):
     If we want partition to work with the HANDLER commands, we
@@ -1956,13 +3579,27 @@
 
 
 /*
-  This routine is used to read the next but only if the key is the same
-  as supplied in the call.
+  Read next record special
+
+  SYNOPSIS
+    index_next_same()
+    buf                   Read row in MySQL Row Format
+    key                   Key
+    keylen                Length of key
+
+  RETURN VALUE
+    >0                    Error code
+    0                     Success
+
+  DESCRIPTION
+    This routine is used to read the next but only if the key is the same
+    as supplied in the call.
 */
 
 int ha_partition::index_next_same(byte *buf, const byte *key, uint keylen)
 {
   DBUG_ENTER("ha_partition::index_next_same");
+
   DBUG_ASSERT(keylen == m_start_key.length);
   DBUG_ASSERT(m_index_scan_type != partition_index_last);
   if (!m_ordered_scan_ongoing)
@@ -1970,13 +3607,26 @@
   DBUG_RETURN(handle_ordered_next(buf, TRUE));
 }
 
+
 /*
-  Used to read backwards through the index.
+  Read next record when performing index scan backwards
+
+  SYNOPSIS
+    index_prev()
+    buf                   Read row in MySQL Row Format
+
+  RETURN VALUE
+    >0                    Error code
+    0                     Success
+
+  DESCRIPTION
+    Used to read backwards through the index.
 */
 
 int ha_partition::index_prev(byte * buf)
 {
   DBUG_ENTER("ha_partition::index_prev");
+
   /* TODO: read comment in index_next */
   DBUG_ASSERT(m_index_scan_type != partition_index_first);
   DBUG_RETURN(handle_ordered_prev(buf));
@@ -1984,10 +3634,24 @@
 
 
 /*
-  We reimplement read_range_first since we don't want the compare_key
-  check at the end. This is already performed in the partition handler.
-  read_range_next is very much different due to that we need to scan
-  all underlying handlers.
+  Start a read of one range with start and end key
+
+  SYNOPSIS
+    read_range_first()
+    start_key           Specification of start key
+    end_key             Specification of end key
+    eq_range_arg        Is it equal range
+    sorted              Should records be returned in sorted order
+
+  RETURN VALUE
+    >0                    Error code
+    0                     Success
+
+  DESCRIPTION
+    We reimplement read_range_first since we don't want the compare_key
+    check at the end. This is already performed in the partition handler.
+    read_range_next is very much different due to that we need to scan
+    all underlying handlers.
 */
 
 int ha_partition::read_range_first(const key_range *start_key,
@@ -1996,6 +3660,7 @@
 {
   int error;
   DBUG_ENTER("ha_partition::read_range_first");
+
   m_ordered= sorted;
   eq_range= eq_range_arg;
   end_range= 0;
@@ -2024,9 +3689,21 @@
 }
 
 
+/*
+  Read next record in read of a range with start and end key
+
+  SYNOPSIS
+    read_range_next()
+
+  RETURN VALUE
+    >0                    Error code
+    0                     Success
+*/
+
 int ha_partition::read_range_next()
 {
   DBUG_ENTER("ha_partition::read_range_next");
+
   if (m_ordered)
   {
     DBUG_RETURN(handler::read_range_next());
@@ -2035,6 +3712,22 @@
 }
 
 
+/*
+  Common routine to set up scans
+
+  SYNOPSIS
+    buf                  Buffer to later return record in
+    idx_read_flag        Is it index scan
+
+  RETURN VALUE
+    >0                    Error code
+    0                     Success
+
+  DESCRIPTION
+    This is where we check which partitions to actually scan if not all
+    of them
+*/
+
 int ha_partition::partition_scan_set_up(byte * buf, bool idx_read_flag)
 {
   DBUG_ENTER("ha_partition::partition_scan_set_up");
@@ -2079,16 +3772,29 @@
   Unordered Index Scan Routines
 ****************************************************************************/
 /*
-  These routines are used to scan partitions without considering order.
-  This is performed in two situations.
-  1) In read_multi_range this is the normal case
-  2) When performing any type of index_read, index_first, index_last where
-  all fields in the partition function is bound. In this case the index
-  scan is performed on only one partition and thus it isn't necessary to
-  perform any sort.
+  Common routine to handle index_next with unordered results
+
+  SYNOPSIS
+    handle_unordered_next()
+    out:buf                       Read row in MySQL Row Format
+    next_same                     Called from index_next_same
+
+  RETURN VALUE
+    HA_ERR_END_OF_FILE            End of scan
+    0                             Success
+    other                         Error code
+
+  DESCRIPTION
+    These routines are used to scan partitions without considering order.
+    This is performed in two situations.
+    1) In read_multi_range this is the normal case
+    2) When performing any type of index_read, index_first, index_last where
+    all fields in the partition function is bound. In this case the index
+    scan is performed on only one partition and thus it isn't necessary to
+    perform any sort.
 */
 
-int ha_partition::handle_unordered_next(byte *buf, bool next_same)
+int ha_partition::handle_unordered_next(byte *buf, bool is_next_same)
 {
   handler *file= file= m_file[m_part_spec.start_part];
   int error;
@@ -2098,7 +3804,7 @@
     We should consider if this should be split into two functions as
     next_same is alwas a local constant
   */
-  if (next_same)
+  if (is_next_same)
   {
     if (!(error= file->index_next_same(buf, m_start_key.key,
                                        m_start_key.length)))
@@ -2127,8 +3833,20 @@
 
 
 /*
-  This routine is used to start the index scan on the next partition.
-  Both initial start and after completing scan on one partition.
+  Handle index_next when changing to new partition
+
+  SYNOPSIS
+    handle_unordered_scan_next_partition()
+    buf                       Read row in MySQL Row Format
+
+  RETURN VALUE
+    HA_ERR_END_OF_FILE            End of scan
+    0                             Success
+    other                         Error code
+
+  DESCRIPTION
+    This routine is used to start the index scan on the next partition.
+    Both initial start and after completing scan on one partition.
 */
 
 int ha_partition::handle_unordered_scan_next_partition(byte * buf)
@@ -2176,30 +3894,43 @@
 
 
 /*
-  This part contains the logic to handle index scans that require ordered
-  output. This includes all except those started by read_range_first with
-  the flag ordered set to FALSE. Thus most direct index_read and all
-  index_first and index_last.
-
-  We implement ordering by keeping one record plus a key buffer for each
-  partition. Every time a new entry is requested we will fetch a new
-  entry from the partition that is currently not filled with an entry.
-  Then the entry is put into its proper sort position.
-
-  Returning a record is done by getting the top record, copying the
-  record to the request buffer and setting the partition as empty on
-  entries.
+  Common routine to start index scan with ordered results
+
+  SYNOPSIS
+    handle_ordered_index_scan()
+    out:buf                       Read row in MySQL Row Format
+
+  RETURN VALUE
+    HA_ERR_END_OF_FILE            End of scan
+    0                             Success
+    other                         Error code
+
+  DESCRIPTION
+    This part contains the logic to handle index scans that require ordered
+    output. This includes all except those started by read_range_first with
+    the flag ordered set to FALSE. Thus most direct index_read and all
+    index_first and index_last.
+
+    We implement ordering by keeping one record plus a key buffer for each
+    partition. Every time a new entry is requested we will fetch a new
+    entry from the partition that is currently not filled with an entry.
+    Then the entry is put into its proper sort position.
+
+    Returning a record is done by getting the top record, copying the
+    record to the request buffer and setting the partition as empty on
+    entries.
 */
 
 int ha_partition::handle_ordered_index_scan(byte *buf)
 {
-  uint i, j= 0;
+  uint i;
+  uint j= 0;
   bool found= FALSE;
   bool reverse_order= FALSE;
   DBUG_ENTER("ha_partition::handle_ordered_index_scan");
 
   m_top_entry= NO_CURRENT_PART_ID;
-  queue_remove_all(&queue);
+  queue_remove_all(&m_queue);
 
   /* now we see what the index of our first important partition is */
   current_partition_index = bitmap_get_first_set(&(m_part_info->used_partitions));
@@ -2263,10 +3994,10 @@
       We found at least one partition with data, now sort all entries and
       after that read the first entry and copy it to the buffer to return in.
     */
-    queue_set_max_at_top(&queue, reverse_order);
-    queue_set_cmp_arg(&queue, (void*)m_curr_key_info);
-    queue.elements= j;
-    queue_fix(&queue);
+    queue_set_max_at_top(&m_queue, reverse_order);
+    queue_set_cmp_arg(&m_queue, (void*)m_curr_key_info);
+    m_queue.elements= j;
+    queue_fix(&m_queue);
     return_top_record(buf);
     DBUG_PRINT("info", ("Record returned from partition %d", m_top_entry));
     DBUG_RETURN(0);
@@ -2275,11 +4006,23 @@
 }
 
 
+/*
+  Return the top record in sort order
+
+  SYNOPSIS
+    return_top_record()
+    out:buf                  Row returned in MySQL Row Format
+
+  RETURN VALUE
+    NONE
+*/
+
 void ha_partition::return_top_record(byte *buf)
 {
   uint part_id;
-  byte *key_buffer= queue_top(&queue);
+  byte *key_buffer= queue_top(&m_queue);
   byte *rec_buffer= key_buffer + PARTITION_BYTES_IN_POS;
+
   part_id= uint2korr(key_buffer);
   memcpy(buf, rec_buffer, m_rec_length);
   m_last_part= part_id;
@@ -2287,14 +4030,28 @@
 }
 
 
-int ha_partition::handle_ordered_next(byte *buf, bool next_same)
+/*
+  Common routine to handle index_next with ordered results
+
+  SYNOPSIS
+    handle_ordered_next()
+    out:buf                       Read row in MySQL Row Format
+    next_same                     Called from index_next_same
+
+  RETURN VALUE
+    HA_ERR_END_OF_FILE            End of scan
+    0                             Success
+    other                         Error code
+*/
+
+int ha_partition::handle_ordered_next(byte *buf, bool is_next_same)
 {
   int error;
   uint part_id= m_top_entry;
   handler *file= m_file[part_id];
   DBUG_ENTER("ha_partition::handle_ordered_next");
 
-  if (!next_same)
+  if (!is_next_same)
     error= file->index_next(rec_buf(part_id));
   else
     error= file->index_next_same(rec_buf(part_id), m_start_key.key,
@@ -2304,8 +4061,8 @@
     if (error == HA_ERR_END_OF_FILE)
     {
       /* Return next buffered row */
-      queue_remove(&queue, (uint) 0);
-      if (queue.elements)
+      queue_remove(&m_queue, (uint) 0);
+      if (m_queue.elements)
       {
          DBUG_PRINT("info", ("Record returned from partition %u (2)",
                      m_top_entry));
@@ -2315,25 +4072,39 @@
     }
     DBUG_RETURN(error);
   }
-  queue_replaced(&queue);
+  queue_replaced(&m_queue);
   return_top_record(buf);
   DBUG_PRINT("info", ("Record returned from partition %u", m_top_entry));
   DBUG_RETURN(0);
 }
 
 
+/*
+  Common routine to handle index_prev with ordered results
+
+  SYNOPSIS
+    handle_ordered_prev()
+    out:buf                       Read row in MySQL Row Format
+
+  RETURN VALUE
+    HA_ERR_END_OF_FILE            End of scan
+    0                             Success
+    other                         Error code
+*/
+
 int ha_partition::handle_ordered_prev(byte *buf)
 {
   int error;
   uint part_id= m_top_entry;
   handler *file= m_file[part_id];
   DBUG_ENTER("ha_partition::handle_ordered_prev");
+
   if ((error= file->index_prev(rec_buf(part_id))))
   {
     if (error == HA_ERR_END_OF_FILE)
     {
-      queue_remove(&queue, (uint) 0);
-      if (queue.elements)
+      queue_remove(&m_queue, (uint) 0);
+      if (m_queue.elements)
       {
 	return_top_record(buf);
 	DBUG_PRINT("info", ("Record returned from partition %d (2)",
@@ -2343,17 +4114,34 @@
     }
     DBUG_RETURN(error);
   }
-  queue_replaced(&queue);
+  queue_replaced(&m_queue);
   return_top_record(buf);
   DBUG_PRINT("info", ("Record returned from partition %d", m_top_entry));
   DBUG_RETURN(0);
 }
 
 
+/*
+  Set fields in partition functions in read set for underlying handlers
+
+  SYNOPSIS
+    include_partition_fields_in_used_fields()
+
+  RETURN VALUE
+    NONE
+
+  DESCRIPTION
+    Some handlers only read fields as specified by the bitmap for the
+    read set. For partitioned handlers we always require that the
+    fields of the partition functions are read such that we can
+    calculate the partition id to place updated and deleted records.
+*/
+
 void ha_partition::include_partition_fields_in_used_fields()
 {
-  DBUG_ENTER("ha_partition::include_partition_fields_in_used_fields");
   Field **ptr= m_part_field_array;
+  DBUG_ENTER("ha_partition::include_partition_fields_in_used_fields");
+
   do
   {
     ha_set_bit_in_read_set((*ptr)->fieldnr);
@@ -2372,57 +4160,68 @@
 */
 
 /*
-  ::info() is used to return information to the optimizer.
-  Currently this table handler doesn't implement most of the fields
-  really needed. SHOW also makes use of this data
-  Another note, if your handler doesn't proved exact record count,
-  you will probably want to have the following in your code:
-  if (records < 2)
-    records = 2;
-  The reason is that the server will optimize for cases of only a single
-  record. If in a table scan you don't know the number of records
-  it will probably be better to set records to two so you can return
-  as many records as you need.
-
-  Along with records a few more variables you may wish to set are:
-    records
-    deleted
-    data_file_length
-    index_file_length
-    delete_length
-    check_time
-  Take a look at the public variables in handler.h for more information.
-
-  Called in:
-    filesort.cc
-    ha_heap.cc
-    item_sum.cc
-    opt_sum.cc
-    sql_delete.cc
-    sql_delete.cc
-    sql_derived.cc
-    sql_select.cc
-    sql_select.cc
-    sql_select.cc
-    sql_select.cc
-    sql_select.cc
-    sql_show.cc
-    sql_show.cc
-    sql_show.cc
-    sql_show.cc
-    sql_table.cc
-    sql_union.cc
-    sql_update.cc
-
-  Some flags that are not implemented
-    HA_STATUS_POS:
-      This parameter is never used from the MySQL Server. It is checked in a
-      place in MyISAM so could potentially be used by MyISAM specific programs.
-    HA_STATUS_NO_LOCK:
-    This is declared and often used. It's only used by MyISAM.
-    It means that MySQL doesn't need the absolute latest statistics
-    information. This may save the handler from doing internal locks while
-    retrieving statistics data.
+  General method to gather info from handler
+
+  SYNOPSIS
+    info()
+    flag              Specifies what info is requested
+
+  RETURN VALUE
+    NONE
+
+  DESCRIPTION
+    ::info() is used to return information to the optimizer.
+    Currently this table handler doesn't implement most of the fields
+    really needed. SHOW also makes use of this data
+    Another note, if your handler doesn't proved exact record count,
+    you will probably want to have the following in your code:
+    if (records < 2)
+      records = 2;
+    The reason is that the server will optimize for cases of only a single
+    record. If in a table scan you don't know the number of records
+    it will probably be better to set records to two so you can return
+    as many records as you need.
+
+    Along with records a few more variables you may wish to set are:
+      records
+      deleted
+      data_file_length
+      index_file_length
+      delete_length
+      check_time
+    Take a look at the public variables in handler.h for more information.
+
+    Called in:
+      filesort.cc
+      ha_heap.cc
+      item_sum.cc
+      opt_sum.cc
+      sql_delete.cc
+     sql_delete.cc
+     sql_derived.cc
+      sql_select.cc
+      sql_select.cc
+      sql_select.cc
+      sql_select.cc
+      sql_select.cc
+      sql_show.cc
+      sql_show.cc
+      sql_show.cc
+      sql_show.cc
+      sql_table.cc
+      sql_union.cc
+      sql_update.cc
+
+    Some flags that are not implemented
+      HA_STATUS_POS:
+        This parameter is never used from the MySQL Server. It is checked in a
+        place in MyISAM so could potentially be used by MyISAM specific
+        programs.
+      HA_STATUS_NO_LOCK:
+      This is declared and often used. It's only used by MyISAM.
+      It means that MySQL doesn't need the absolute latest statistics
+      information. This may save the handler from doing internal locks while
+      retrieving statistics data.
 */
 
 void ha_partition::info(uint flag)
@@ -2583,7 +4382,41 @@
 }
 
 
+void ha_partition::get_dynamic_partition_info(PARTITION_INFO *stat_info,
+                                              uint part_id)
+{
+  handler *file= m_file[part_id];
+  file->info(HA_STATUS_CONST | HA_STATUS_TIME | HA_STATUS_VARIABLE |
+             HA_STATUS_NO_LOCK);
+
+  stat_info->records= file->records;
+  stat_info->mean_rec_length= file->mean_rec_length;
+  stat_info->data_file_length= file->data_file_length;
+  stat_info->max_data_file_length= file->max_data_file_length;
+  stat_info->index_file_length= file->index_file_length;
+  stat_info->delete_length= file->delete_length;
+  stat_info->create_time= file->create_time;
+  stat_info->update_time= file->update_time;
+  stat_info->check_time= file->check_time;
+  stat_info->check_sum= 0;
+  if (file->table_flags() & (ulong) HA_HAS_CHECKSUM)
+    stat_info->check_sum= file->checksum();
+  return;
+}
+
+
 /*
+  General function to prepare handler for certain behavior
+
+  SYNOPSIS
+    extra()
+    operation              Operation type for extra call
+
+  RETURN VALUE
+    >0                     Error code
+    0                      Success
+
+  DESCRIPTION
   extra() is called whenever the server wishes to send a hint to
   the storage engine. The MyISAM engine implements the most hints.
 
@@ -2929,8 +4762,18 @@
 
 
 /*
-  This will in the future be called instead of extra(HA_EXTRA_RESET) as this
-  is such a common call
+  Special extra call to reset extra parameters
+
+  SYNOPSIS
+    reset()
+
+  RETURN VALUE
+    >0                   Error code
+    0                    Success
+
+  DESCRIPTION
+    This will in the future be called instead of extra(HA_EXTRA_RESET) as this
+    is such a common call
 */
 
 int ha_partition::reset(void)
@@ -2950,15 +4793,40 @@
   DBUG_RETURN(result);
 }
 
+/*
+  Special extra method for HA_EXTRA_CACHE with cachesize as extra parameter
+
+  SYNOPSIS
+    extra_opt()
+    operation                      Must be HA_EXTRA_CACHE
+    cachesize                      Size of cache in full table scan
+
+  RETURN VALUE
+    >0                   Error code
+    0                    Success
+*/
+
 int ha_partition::extra_opt(enum ha_extra_function operation, ulong cachesize)
 {
   DBUG_ENTER("ha_partition::extra_opt()");
+
   DBUG_ASSERT(HA_EXTRA_CACHE == operation);
   prepare_extra_cache(cachesize);
   DBUG_RETURN(0);
 }
 
 
+/*
+  Call extra on handler with HA_EXTRA_CACHE and cachesize
+
+  SYNOPSIS
+    prepare_extra_cache()
+    cachesize                Size of cache for full table scan
+
+  RETURN VALUE
+    NONE
+*/
+
 void ha_partition::prepare_extra_cache(uint cachesize)
 {
   DBUG_ENTER("ha_partition::prepare_extra_cache()");
@@ -2974,11 +4842,24 @@
 }
 
 
+/*
+  Call extra on all partitions
+
+  SYNOPSIS
+    loop_extra()
+    operation             extra operation type
+
+  RETURN VALUE
+    >0                    Error code
+    0                     Success
+*/
+
 int ha_partition::loop_extra(enum ha_extra_function operation)
 {
   int result= 0, tmp;
   handler **file;
   DBUG_ENTER("ha_partition::loop_extra()");
+
   for (file= m_file; *file; file++)
   {
     if ((tmp= (*file)->extra(operation)))
@@ -2988,10 +4869,22 @@
 }
 
 
+/*
+  Call extra(HA_EXTRA_CACHE) on next partition_id
+
+  SYNOPSIS
+    late_extra_cache()
+    partition_id               Partition id to call extra on
+
+  RETURN VALUE
+    NONE
+*/
+
 void ha_partition::late_extra_cache(uint partition_id)
 {
   handler *file;
   DBUG_ENTER("ha_partition::late_extra_cache");
+
   if (!m_extra_cache)
     DBUG_VOID_RETURN;
   file= m_file[partition_id];
@@ -3003,10 +4896,22 @@
 }
 
 
+/*
+  Call extra(HA_EXTRA_NO_CACHE) on next partition_id
+
+  SYNOPSIS
+    late_extra_no_cache()
+    partition_id               Partition id to call extra on
+
+  RETURN VALUE
+    NONE
+*/
+
 void ha_partition::late_extra_no_cache(uint partition_id)
 {
   handler *file;
   DBUG_ENTER("ha_partition::late_extra_no_cache");
+
   if (!m_extra_cache)
     DBUG_VOID_RETURN;
   file= m_file[partition_id];
@@ -3019,12 +4924,34 @@
                 MODULE optimiser support
 ****************************************************************************/
 
+/*
+  Get keys to use for scanning
+
+  SYNOPSIS
+    keys_to_use_for_scanning()
+
+  RETURN VALUE
+    key_map of keys usable for scanning
+*/
+
 const key_map *ha_partition::keys_to_use_for_scanning()
 {
   DBUG_ENTER("ha_partition::keys_to_use_for_scanning");
+
   DBUG_RETURN(m_file[0]->keys_to_use_for_scanning());
 }
 
+
+/*
+  Return time for a scan of the table
+
+  SYNOPSIS
+    scan_time()
+
+  RETURN VALUE
+    time for scan
+*/
+
 double ha_partition::scan_time()
 {
   double scan_time= 0;
@@ -3038,28 +4965,53 @@
 
 
 /*
-  This will be optimised later to include whether or not the index can
-  be used with partitioning. To achieve we need to add another parameter
-  that specifies how many of the index fields that are bound in the ranges.
-  Possibly added as a new call to handlers.
+  Get time to read
+
+  SYNOPSIS
+    read_time()
+    index                Index number used
+    ranges               Number of ranges
+    rows                 Number of rows
+
+  RETURN VALUE
+    time for read
+
+  DESCRIPTION
+    This will be optimised later to include whether or not the index can
+    be used with partitioning. To achieve we need to add another parameter
+    that specifies how many of the index fields that are bound in the ranges.
+    Possibly added as a new call to handlers.
 */
 
 double ha_partition::read_time(uint index, uint ranges, ha_rows rows)
 {
   DBUG_ENTER("ha_partition::read_time");
+
   DBUG_RETURN(m_file[0]->read_time(index, ranges, rows));
 }
 
 /*
-  Given a starting key, and an ending key estimate the number of rows that
-  will exist between the two. end_key may be empty which in case determine
-  if start_key matches any rows.
-
-  Called from opt_range.cc by check_quick_keys().
-
-  monty: MUST be called for each range and added.
-	 Note that MySQL will assume that if this returns 0 there is no
-         matching rows for the range!
+  Find number of records in a range
+
+  SYNOPSIS
+    records_in_range()
+    inx                  Index number
+    min_key              Start of range
+    max_key              End of range
+
+  RETURN VALUE
+    Number of rows in range
+
+  DESCRIPTION
+    Given a starting key, and an ending key estimate the number of rows that
+    will exist between the two. end_key may be empty which in case determine
+    if start_key matches any rows.
+
+    Called from opt_range.cc by check_quick_keys().
+
+    monty: MUST be called for each range and added.
+          Note that MySQL will assume that if this returns 0 there is no
+          matching rows for the range!
 */
 
 ha_rows ha_partition::records_in_range(uint inx, key_range *min_key,
@@ -3080,6 +5032,16 @@
 }
 
 
+/*
+  Estimate upper bound of number of rows
+
+  SYNOPSIS
+    estimate_rows_upper_bound()
+
+  RETURN VALUE
+    Number of rows
+*/
+
 ha_rows ha_partition::estimate_rows_upper_bound()
 {
   ha_rows rows, tot_rows= 0;
@@ -3101,9 +5063,48 @@
 }
 
 
+/*
+  Is it ok to switch to a new engine for this table
+
+  SYNOPSIS
+    can_switch_engine()
+
+  RETURN VALUE
+    TRUE                  Ok
+    FALSE                 Not ok
+
+  DESCRIPTION
+    Used to ensure that tables with foreign key constraints are not moved
+    to engines without foreign key support.
+*/
+
+bool ha_partition::can_switch_engines()
+{
+  handler **file;
+  DBUG_ENTER("ha_partition::can_switch_engines");
+ 
+  file= m_file;
+  do
+  {
+    if (!(*file)->can_switch_engines())
+      DBUG_RETURN(FALSE);
+  } while (*(++file));
+  DBUG_RETURN(TRUE);
+}
+
+
+/*
+  Is table cache supported
+
+  SYNOPSIS
+    table_cache_type()
+
+*/
+
 uint8 ha_partition::table_cache_type()
 {
   DBUG_ENTER("ha_partition::table_cache_type");
+
   DBUG_RETURN(m_file[0]->table_cache_type());
 }
 
@@ -3115,6 +5116,7 @@
 const char *ha_partition::index_type(uint inx)
 {
   DBUG_ENTER("ha_partition::index_type");
+
   DBUG_RETURN(m_file[0]->index_type(inx));
 }
 
@@ -3122,8 +5124,11 @@
 void ha_partition::print_error(int error, myf errflag)
 {
   DBUG_ENTER("ha_partition::print_error");
+
   /* Should probably look for my own errors first */
   /* monty: needs to be called for the last used partition ! */
+  DBUG_PRINT("enter", ("error = %d", error));
+
   if (error == HA_ERR_NO_PARTITION_FOUND)
     my_error(ER_NO_PARTITION_FOR_GIVEN_VALUE, MYF(0),
              m_part_info->part_expr->val_int());
@@ -3136,6 +5141,7 @@
 bool ha_partition::get_error_message(int error, String *buf)
 {
   DBUG_ENTER("ha_partition::get_error_message");
+
   /* Should probably look for my own errors first */
   /* monty: needs to be called for the last used partition ! */
   DBUG_RETURN(m_file[0]->get_error_message(error, buf));
@@ -3160,7 +5166,8 @@
 { return ha_partition_ext; }
 
 
-uint ha_partition::min_of_the_max_uint(uint (handler::*operator_func)(void) const) const
+uint ha_partition::min_of_the_max_uint(
+                       uint (handler::*operator_func)(void) const) const
 {
   handler **file;
   uint min_of_the_max= ((*m_file)->*operator_func)();
@@ -3208,6 +5215,7 @@
 {
   handler **file;
   uint max= (*m_file)->extra_rec_buf_length();
+
   for (file= m_file, file++; *file; file++)
     if (max < (*file)->extra_rec_buf_length())
       max= (*file)->extra_rec_buf_length();
@@ -3219,6 +5227,7 @@
 {
   handler **file;
   uint max= (*m_file)->min_record_length(options);
+
   for (file= m_file, file++; *file; file++)
     if (max < (*file)->min_record_length(options))
       max= (*file)->min_record_length(options);
@@ -3230,10 +5239,23 @@
                 MODULE compare records
 ****************************************************************************/
 /*
-  We get two references and need to check if those records are the same.
-  If they belong to different partitions we decide that they are not
-  the same record. Otherwise we use the particular handler to decide if
-  they are the same. Sort in partition id order if not equal.
+  Compare two positions
+
+  SYNOPSIS
+    cmp_ref()
+    ref1                   First position
+    ref2                   Second position
+
+  RETURN VALUE
+    <0                     ref1 < ref2
+    0                      Equal
+    >0                     ref1 > ref2
+
+  DESCRIPTION
+    We get two references and need to check if those records are the same.
+    If they belong to different partitions we decide that they are not
+    the same record. Otherwise we use the particular handler to decide if
+    they are the same. Sort in partition id order if not equal.
 */
 
 int ha_partition::cmp_ref(const byte *ref1, const byte *ref2)
@@ -3242,9 +5264,10 @@
   my_ptrdiff_t diff1, diff2;
   handler *file;
   DBUG_ENTER("ha_partition::cmp_ref");
+
   if ((ref1[0] == ref2[0]) && (ref1[1] == ref2[1]))
   {
-    part_id= get_part_id_from_pos(ref1);
+    part_id= uint2korr(ref1);
     file= m_file[part_id];
     DBUG_ASSERT(part_id < m_tot_parts);
     DBUG_RETURN(file->cmp_ref((ref1 + PARTITION_BYTES_IN_POS),
@@ -3275,6 +5298,7 @@
 void ha_partition::restore_auto_increment()
 {
   DBUG_ENTER("ha_partition::restore_auto_increment");
+
   DBUG_VOID_RETURN;
 }
 
@@ -3289,6 +5313,7 @@
 ulonglong ha_partition::get_auto_increment()
 {
   DBUG_ENTER("ha_partition::get_auto_increment");
+
   DBUG_RETURN(m_file[0]->get_auto_increment());
 }
 
@@ -3324,6 +5349,7 @@
 /*
   Function we use in the creation of our hash to get key.
 */
+
 static byte *partition_get_key(PARTITION_SHARE *share, uint *length,
 			       my_bool not_used __attribute__ ((unused)))
 {
@@ -3337,7 +5363,6 @@
   Well, you have pieces that are used for locking, and they are needed to
   function.
 */
-
 
 static PARTITION_SHARE *get_share(const char *table_name, TABLE *table)
 {

--- 1.13/sql/ha_partition.h	2006-01-17 16:36:21 -08:00
+++ 1.14/sql/ha_partition.h	2006-01-17 16:39:19 -08:00
@@ -18,6 +18,11 @@
 #pragma interface				/* gcc class implementation */
 #endif
 
+enum partition_keywords
+{ 
+  PKW_HASH= 0, PKW_RANGE, PKW_LIST, PKW_KEY, PKW_MAXVALUE, PKW_LINEAR
+};
+
 /*
   PARTITION_SHARE is a structure that will be shared amoung all open handlers
   The partition implements the minimum of what you will probably need.
@@ -53,10 +58,15 @@
     partition_no_index_scan= 3
   };
   /* Data for the partition handler */
+  int  m_mode;                          // Open mode
+  uint m_open_test_lock;                // Open test_if_locked
   char *m_file_buffer;                  // Buffer with names
   char *m_name_buffer_ptr;		// Pointer to first partition name
   handlerton **m_engine_array;          // Array of types of the handlers
   handler **m_file;                     // Array of references to handler inst.
+  handler **m_new_file;                 // Array of references to new handlers
+  handler **m_reorged_file;             // Reorganised partitions
+  handler **m_added_file;               // Added parts kept for errors
   partition_info *m_part_info;          // local reference to partition
   byte *m_start_key_ref;                // Reference of start key in current
                                         // index scan info
@@ -64,7 +74,7 @@
   byte *m_ordered_rec_buffer;           // Row and key buffer for ord. idx scan
   KEY *m_curr_key_info;                 // Current index
   byte *m_rec0;                         // table->record[0]
-  QUEUE queue;                          // Prio queue used by sorted read
+  QUEUE m_queue;                        // Prio queue used by sorted read
   /*
     Since the partition handler is a handler on top of other handlers, it
     is necessary to keep information about what the underlying handler
@@ -75,6 +85,7 @@
   u_long m_table_flags;
   u_long m_low_byte_first;
 
+  uint m_reorged_parts;                  // Number of reorganised parts
   uint m_tot_parts;                      // Total number of partitions;
   uint m_no_locks;                        // For engines like ha_blackhole, which needs no locks
   uint m_last_part;                      // Last file that we update,write
@@ -178,21 +189,38 @@
   */
   virtual int delete_table(const char *from);
   virtual int rename_table(const char *from, const char *to);
-  virtual int create(const char *name, TABLE * form,
-		     HA_CREATE_INFO * create_info);
+  virtual int create(const char *name, TABLE *form,
+		     HA_CREATE_INFO *create_info);
   virtual int create_handler_files(const char *name);
-  virtual void update_create_info(HA_CREATE_INFO * create_info);
+  virtual void update_create_info(HA_CREATE_INFO *create_info);
   virtual char *update_table_comment(const char *comment);
+  virtual int change_partitions(HA_CREATE_INFO *create_info,
+                                const char *path,
+                                ulonglong *copied,
+                                ulonglong *deleted,
+                                const void *pack_frm_data,
+                                uint pack_frm_len);
   virtual int drop_partitions(const char *path);
+  virtual int rename_partitions(const char *path);
+  bool get_no_parts(const char *name, uint *no_parts)
+  {
+    DBUG_ENTER("ha_partition::get_no_parts");
+    *no_parts= m_tot_parts;
+    DBUG_RETURN(0);
+  }
 private:
+  int copy_partitions(ulonglong *copied, ulonglong *deleted);
+  void cleanup_new_partition(uint part_count);
+  int prepare_new_partition(TABLE *table, HA_CREATE_INFO *create_info,
+                            handler *file, const char *part_name);
   /*
     delete_table, rename_table and create uses very similar logic which
     is packed into this routine.
   */
     uint del_ren_cre_table(const char *from,
 			   const char *to= NULL,
-			   TABLE * table_arg= NULL,
-			   HA_CREATE_INFO * create_info= NULL);
+			   TABLE *table_arg= NULL,
+			   HA_CREATE_INFO *create_info= NULL);
   /*
     One method to create the table_name.par file containing the names of the
     underlying partitions, their engine and the number of partitions.
@@ -419,6 +447,8 @@
     -------------------------------------------------------------------------
   */
   virtual void info(uint);
+  void get_dynamic_partition_info(PARTITION_INFO *stat_info,
+                                  uint part_id);
   virtual int extra(enum ha_extra_function operation);
   virtual int extra_opt(enum ha_extra_function operation, ulong cachesize);
   virtual int reset(void);
@@ -651,30 +681,8 @@
     index scan module.
     (NDB)
   */
-  virtual ulong alter_table_flags(void) const
-  {
-    //return HA_ONLINE_ADD_EMPTY_PARTITION + HA_ONLINE_DROP_PARTITION;
-    return HA_ONLINE_DROP_PARTITION;
-  }
   virtual ulong table_flags() const
   { return m_table_flags; }
-  /*
-    HA_CAN_PARTITION:
-    Used by storage engines that can handle partitioning without this
-    partition handler
-    (Partition, NDB)
-
-    HA_CAN_UPDATE_PARTITION_KEY:
-    Set if the handler can update fields that are part of the partition
-    function.
-
-    HA_CAN_PARTITION_UNIQUE:
-    Set if the handler can handle unique indexes where the fields of the
-    unique key are not part of the fields of the partition function. Thus
-    a unique key can be set on all fields.
-  */
-  virtual ulong partition_flags() const
-  { return HA_CAN_PARTITION; }
 
   /*
     This is a bitmap of flags that says how the storage engine
@@ -838,6 +846,8 @@
     description of how the CREATE TABLE part to define FOREIGN KEY's is done.
     free_foreign_key_create_info is used to free the memory area that provided
     this description.
+    can_switch_engines checks if it is ok to switch to a new engine based on
+    the foreign key info in the table.
     -------------------------------------------------------------------------
 
     virtual char* get_foreign_key_create_info()
@@ -847,7 +857,7 @@
     List<FOREIGN_KEY_INFO> *f_key_list)
     virtual uint referenced_by_foreign_key()
   */
-
+    virtual bool can_switch_engines();
   /*
     -------------------------------------------------------------------------
     MODULE fulltext index
@@ -896,16 +906,35 @@
     -------------------------------------------------------------------------
     MODULE admin MyISAM
     -------------------------------------------------------------------------
+
+    -------------------------------------------------------------------------
+      OPTIMIZE TABLE, CHECK TABLE, ANALYZE TABLE and REPAIR TABLE are
+      mapped to a routine that handles looping over a given set of
+      partitions and those routines send a flag indicating to execute on
+      all partitions.
+    -------------------------------------------------------------------------
+  */
+    virtual int optimize(THD* thd, HA_CHECK_OPT *check_opt);
+    virtual int analyze(THD* thd, HA_CHECK_OPT *check_opt);
+    virtual int check(THD* thd, HA_CHECK_OPT *check_opt);
+    virtual int repair(THD* thd, HA_CHECK_OPT *check_opt);
+    virtual int optimize_partitions(THD *thd);
+    virtual int analyze_partitions(THD *thd);
+    virtual int check_partitions(THD *thd);
+    virtual int repair_partitions(THD *thd);
+
+    private:
+    int handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt,
+                              uint flags, bool all_parts);
+    public:
+  /*
+    -------------------------------------------------------------------------
     Admin commands not supported currently (almost purely MyISAM routines)
     This means that the following methods are not implemented:
     -------------------------------------------------------------------------
 
-    virtual int check(THD* thd, HA_CHECK_OPT *check_opt);
     virtual int backup(TD* thd, HA_CHECK_OPT *check_opt);
     virtual int restore(THD* thd, HA_CHECK_OPT *check_opt);
-    virtual int repair(THD* thd, HA_CHECK_OPT *check_opt);
-    virtual int optimize(THD* thd, HA_CHECK_OPT *check_opt);
-    virtual int analyze(THD* thd, HA_CHECK_OPT *check_opt);
     virtual int assign_to_keycache(THD* thd, HA_CHECK_OPT *check_opt);
     virtual int preload_keys(THD *thd, HA_CHECK_OPT *check_opt);
     virtual bool check_and_repair(THD *thd);

--- 1.23/sql/sql_partition.cc	2006-01-17 09:35:03 -08:00
+++ 1.24/sql/sql_partition.cc	2006-01-17 16:39:19 -08:00
@@ -108,6 +108,18 @@
 uint32 get_partition_id_key_sub(partition_info *part_info); 
 uint32 get_partition_id_linear_hash_sub(partition_info *part_info); 
 uint32 get_partition_id_linear_key_sub(partition_info *part_info); 
+
+
+/*
+  for storing partition names, in order to check if partition exists for a table
+*/
+static byte *partition_names_get_key(partition_element *partition_element, uint *length,
+			       my_bool not_used __attribute__ ((unused)))
+{
+  *length= strlen(partition_element->partition_name);
+  return (byte *) partition_element->partition_name;
+}
+
 #endif
 
 
@@ -136,6 +148,134 @@
 }
 
 #ifdef WITH_PARTITION_STORAGE_ENGINE
+
+/*
+  Populate partition name hash
+
+  SYNOPSIS
+   populate_partition_name_hash()
+   (no parameters)
+
+  DESCRIPTION
+   This method will iterate over the list of partition elements and insert them
+   into a hash based on partition name.  This hash sits on the partition info object
+   but is used by the handler for determining if a partition specified in a query
+   actually exists.
+
+  RETURN
+   1 on error
+   0 if successful
+
+*/
+int populate_partition_name_hash(partition_info *m_part_info)
+{
+  List_iterator_fast <partition_element> part_it(m_part_info->partitions);
+
+  (void) hash_init(&(m_part_info->partition_names), system_charset_info, 32, 0, 0,
+                   (hash_get_key) partition_names_get_key, 0, 0);
+
+  int index= 0;
+  partition_element *el;
+  while ((el= part_it++) != NULL)
+  {
+    bool has_sub_parts= is_sub_partitioned(m_part_info);
+
+    if (! has_sub_parts)
+	    el->index= index++;
+    if (my_hash_insert(&(m_part_info->partition_names), (byte*)el))
+      return 1;
+    if (!has_sub_parts)
+      continue;
+    List_iterator_fast <partition_element> sub_part_it(el->subpartitions);
+    partition_element *subel;
+    while ((subel= sub_part_it++) != NULL)
+    {
+      subel->index= index++;
+      if (my_hash_insert(&(m_part_info->partition_names), (byte*)subel))
+        return 1;
+    }
+  }
+  return 0;
+}
+/*
+
+  Check if a partition name exists in the table
+
+  SYNOPSIS
+    set_specific_partition
+    partition_name - string of partition name
+
+  DESCRIPTION
+    This function takes the given partition name and checks it against the list
+    of partitions and subpartitions, setting the current_partition_index and
+    start_part equal to the chosen
+
+  RETURN
+    error code if error
+    0 if no partition exists
+*/
+int set_specific_partition(MY_BITMAP *bitmap, partition_info *m_part_info,
+                                         const char *partition_name)
+{
+  int error;
+  DBUG_ENTER("set_specific_partition");
+  partition_element *el= (partition_element*)hash_search(
+	                                 &(m_part_info->partition_names),
+	                                 (byte*)partition_name,
+									                  strlen(partition_name));
+  if (el == NULL)
+  {
+    error= my_error(ER_NO_SUCH_PARTITION, MYF(0), partition_name);
+    DBUG_RETURN(error);
+  }
+
+  DBUG_PRINT("info", ("selected partition %s is in table", partition_name));
+  bitmap_clear_all(bitmap);
+
+  if (is_sub_partitioned(m_part_info) && (el->subpartitions.elements > 0))
+  {
+    List_iterator<partition_element> sub_part_it(el->subpartitions);
+    partition_element *sub_el;
+    while ((sub_el= sub_part_it++) != NULL)
+    {
+      bitmap_set_bit(bitmap, sub_el->index);
+    }
+  }
+  else
+    bitmap_set_bit(bitmap, el->index);
+
+  DBUG_RETURN(0);
+}
+
+/*
+  Return numeric partition number given a partition name
+
+  SYNOPSIS
+    get_partition_index()
+    partition_name - string of partition name
+
+  DESCRIPTION
+   This method returns a partition number based on a given partition
+   name by keying the hash with that name, and then returning the
+   partition_element index.
+
+  RETURN
+   -1 if error
+   > 0 integer representing the index number
+*/
+int get_partition_index(partition_info *m_part_info,
+                        const char* partition_name)
+{
+  partition_element *el= (partition_element*)hash_search(
+                                         &(m_part_info->partition_names),
+	                                 (byte*)partition_name,
+                                         strlen(partition_name));
+  if (el == NULL)
+    return -1;
+  return el->index;
+}
+
+
 /*
   A support function to check if a name is in a list of strings
 
Thread
bk commit into 5.1 tree (patg:1.2022)Patrick Galbraith18 Jan