List:Commits« Previous MessageNext Message »
From:Sergey Vojtovich Date:October 7 2011 6:12am
Subject:bzr push into mysql-trunk branch (sergey.vojtovich:3476 to 3477)
View as plain text  
 3477 Sergey Vojtovich	2011-10-07 [merge]
      Merge.

    added:
      mysql-test/std_data/bug48633.ARM
      mysql-test/std_data/bug48633.ARZ
      mysql-test/std_data/bug48633.frm
    modified:
      mysql-test/r/archive.result
      mysql-test/t/archive.test
      mysql-test/valgrind.supp
      sql/table.cc
      storage/archive/azio.c
      storage/archive/ha_archive.cc
      storage/archive/ha_archive.h
 3476 Marc Alff	2011-10-06
      Merge cleanup
      
      Applied dos2unix for whitespace

    modified:
      storage/perfschema/table_ews_by_thread_by_event_name.cc
      storage/perfschema/table_socket_summary_by_event_name.cc
      storage/perfschema/table_socket_summary_by_instance.cc
      storage/perfschema/table_socket_summary_by_instance.h
=== modified file 'mysql-test/r/archive.result'
--- a/mysql-test/r/archive.result	2011-03-03 09:22:17 +0000
+++ b/mysql-test/r/archive.result	2011-10-06 13:51:28 +0000
@@ -12729,15 +12729,17 @@ id	id	name	name
 2	2	a	b
 DROP TABLE t1,t2;
 SHOW CREATE TABLE t1;
-ERROR HY000: Table upgrade required. Please do "REPAIR TABLE `t1`" or dump/reload to fix it!
+Table	Create Table
+t1	CREATE TABLE `t1` (
+  `col1` int(11) DEFAULT NULL,
+  `col2` varchar(20) DEFAULT NULL
+) ENGINE=ARCHIVE DEFAULT CHARSET=latin1
 SELECT * FROM t1;
-ERROR HY000: Table upgrade required. Please do "REPAIR TABLE `t1`" or dump/reload to fix it!
+col1	col2
 INSERT INTO t1 (col1, col2) VALUES (1, "value");
-ERROR HY000: Table upgrade required. Please do "REPAIR TABLE `t1`" or dump/reload to fix it!
 REPAIR TABLE t1;
 Table	Op	Msg_type	Msg_text
-test.t1	repair	Error	Table upgrade required. Please do "REPAIR TABLE `t1`" or dump/reload to fix it!
-test.t1	repair	error	Corrupt
+test.t1	repair	status	OK
 DROP TABLE t1;
 #
 # BUG#48757 - missing .ARZ file causes server crash
@@ -12823,3 +12825,59 @@ a	b	c	d	e	f
 -1	b	c	d	e	1
 DROP TABLE t1;
 SET sort_buffer_size=DEFAULT;
+#
+# BUG#11756687 - 48633: ARCHIVE TABLES ARE NOT UPGRADEABLE
+#
+SHOW CREATE TABLE t1;
+Table	Create Table
+t1	CREATE TABLE `t1` (
+  `a` int(11) DEFAULT NULL,
+  `b` text,
+  `c` varchar(255) DEFAULT NULL,
+  `d` blob,
+  `e` blob
+) ENGINE=ARCHIVE DEFAULT CHARSET=latin1
+SELECT * FROM t1;
+a	b	c	d	e
+1	text	varchar	blob1	blob2
+2	text	varchar	blob1	blob2
+SELECT * FROM t1;
+a	b	c	d	e
+1	text	varchar	blob1	blob2
+2	text	varchar	blob1	blob2
+FLUSH TABLE t1;
+SELECT * FROM t1;
+a	b	c	d	e
+1	text	varchar	blob1	blob2
+2	text	varchar	blob1	blob2
+CHECK TABLE t1;
+Table	Op	Msg_type	Msg_text
+test.t1	check	error	Table upgrade required. Please do "REPAIR TABLE `t1`" or dump/reload to fix it!
+SELECT * FROM t1;
+a	b	c	d	e
+1	text	varchar	blob1	blob2
+2	text	varchar	blob1	blob2
+INSERT INTO t1 VALUES(3, 'text', 'varchar', 'blob1', 'blob2');
+SELECT * FROM t1;
+a	b	c	d	e
+1	text	varchar	blob1	blob2
+2	text	varchar	blob1	blob2
+3	text	varchar	blob1	blob2
+FLUSH TABLE t1;
+SELECT * FROM t1;
+a	b	c	d	e
+1	text	varchar	blob1	blob2
+2	text	varchar	blob1	blob2
+3	text	varchar	blob1	blob2
+REPAIR TABLE t1;
+Table	Op	Msg_type	Msg_text
+test.t1	repair	status	OK
+SELECT * FROM t1;
+a	b	c	d	e
+1	text	varchar	blob1	blob2
+2	text	varchar	blob1	blob2
+3	text	varchar	blob1	blob2
+CHECK TABLE t1;
+Table	Op	Msg_type	Msg_text
+test.t1	check	status	OK
+DROP TABLE t1;

=== added file 'mysql-test/std_data/bug48633.ARM'
Binary files a/mysql-test/std_data/bug48633.ARM	1970-01-01 00:00:00 +0000 and b/mysql-test/std_data/bug48633.ARM	2011-10-06 13:51:28 +0000 differ

=== added file 'mysql-test/std_data/bug48633.ARZ'
Binary files a/mysql-test/std_data/bug48633.ARZ	1970-01-01 00:00:00 +0000 and b/mysql-test/std_data/bug48633.ARZ	2011-10-06 13:51:28 +0000 differ

=== added file 'mysql-test/std_data/bug48633.frm'
Binary files a/mysql-test/std_data/bug48633.frm	1970-01-01 00:00:00 +0000 and b/mysql-test/std_data/bug48633.frm	2011-10-06 13:51:28 +0000 differ

=== modified file 'mysql-test/t/archive.test'
--- a/mysql-test/t/archive.test	2011-03-03 09:22:17 +0000
+++ b/mysql-test/t/archive.test	2011-10-06 13:51:28 +0000
@@ -12,6 +12,7 @@ DROP TABLE if exists t1,t2,t3,t4,t5,t6;
 --enable_warnings
 
 SET default_storage_engine=ARCHIVE;
+let $MYSQLD_DATADIR= `SELECT @@datadir`;
 
 CREATE TABLE t1 (
   Period smallint(4) unsigned zerofill DEFAULT '0000' NOT NULL,
@@ -1641,18 +1642,14 @@ DROP TABLE t1,t2;
 #
 # BUG#47012 archive tables are not upgradeable, and server crashes on any access
 #
-let $MYSQLD_DATADIR= `SELECT @@datadir`;
 copy_file std_data/bug47012.frm $MYSQLD_DATADIR/test/t1.frm;
 copy_file std_data/bug47012.ARZ $MYSQLD_DATADIR/test/t1.ARZ;
 copy_file std_data/bug47012.ARM $MYSQLD_DATADIR/test/t1.ARM;
 
---error ER_TABLE_NEEDS_UPGRADE
 SHOW CREATE TABLE t1;
 
---error ER_TABLE_NEEDS_UPGRADE
 SELECT * FROM t1;
 
---error ER_TABLE_NEEDS_UPGRADE
 INSERT INTO t1 (col1, col2) VALUES (1, "value");
 
 REPAIR TABLE t1;
@@ -1715,7 +1712,6 @@ CREATE TABLE t1 (a int) ENGINE=ARCHIVE;
 SHOW CREATE TABLE t1;
 INSERT INTO t1 VALUES (1);
 OPTIMIZE TABLE t1;
-let $MYSQLD_DATADIR= `select @@datadir`;
 remove_file $MYSQLD_DATADIR/test/t1.frm;
 FLUSH TABLES;
 INSERT INTO t1 VALUES (2);
@@ -1745,3 +1741,33 @@ INSERT INTO t1 SELECT t1.* FROM t1,t1 t2
 SELECT * FROM t1 ORDER BY f LIMIT 1;
 DROP TABLE t1;
 SET sort_buffer_size=DEFAULT;
+
+--echo #
+--echo # BUG#11756687 - 48633: ARCHIVE TABLES ARE NOT UPGRADEABLE
+--echo #
+copy_file std_data/bug48633.frm $MYSQLD_DATADIR/test/t1.frm;
+copy_file std_data/bug48633.ARZ $MYSQLD_DATADIR/test/t1.ARZ;
+copy_file std_data/bug48633.ARM $MYSQLD_DATADIR/test/t1.ARM;
+SHOW CREATE TABLE t1;
+# Test first table scan
+SELECT * FROM t1;
+# Test second table scan
+SELECT * FROM t1;
+# Test table close
+FLUSH TABLE t1;
+SELECT * FROM t1;
+# Test check
+CHECK TABLE t1;
+SELECT * FROM t1;
+# Test insert
+INSERT INTO t1 VALUES(3, 'text', 'varchar', 'blob1', 'blob2');
+SELECT * FROM t1;
+# Test table close after insert
+FLUSH TABLE t1;
+SELECT * FROM t1;
+# Test repair
+REPAIR TABLE t1;
+SELECT * FROM t1;
+# Test check table after upgrade
+CHECK TABLE t1;
+DROP TABLE t1;

=== modified file 'mysql-test/valgrind.supp'
--- a/mysql-test/valgrind.supp	2011-09-19 14:08:30 +0000
+++ b/mysql-test/valgrind.supp	2011-10-07 06:03:14 +0000
@@ -281,6 +281,16 @@
    fun:do_flush
 }
 
+{
+  libz deflate4
+  Memcheck:Param
+  write(buf)
+  fun:*
+  fun:my_write
+  fun:do_flush
+  fun:azclose
+}
+
 #
 # Warning from my_thread_init becasue mysqld dies before kill thread exists
 #

=== modified file 'sql/table.cc'
--- a/sql/table.cc	2011-09-26 13:48:06 +0000
+++ b/sql/table.cc	2011-10-07 06:03:14 +0000
@@ -1173,7 +1173,7 @@ static int open_binary_frm(THD *thd, TAB
     }
     else
 #endif
-    if (share->mysql_version >= 50110)
+    if (share->mysql_version >= 50110 && next_chunk < buff_end)
     {
       /* New auto_partitioned indicator introduced in 5.1.11 */
 #ifdef WITH_PARTITION_STORAGE_ENGINE

=== modified file 'storage/archive/azio.c'
--- a/storage/archive/azio.c	2011-06-03 07:49:05 +0000
+++ b/storage/archive/azio.c	2011-10-06 13:51:28 +0000
@@ -71,6 +71,7 @@ int az_open (azio_stream *s, const char 
   s->version = (unsigned char)az_magic[1]; /* this needs to be a define to version */
   s->minor_version= (unsigned char) az_magic[2]; /* minor version */
   s->dirty= AZ_STATE_CLEAN;
+  s->start= 0;
 
   /*
     We do our own version of append by nature. 
@@ -169,6 +170,9 @@ int write_header(azio_stream *s)
   char buffer[AZHEADER_SIZE + AZMETA_BUFFER_SIZE];
   char *ptr= buffer;
 
+  if (s->version == 1)
+    return 0;
+
   s->block_size= AZ_BUFSIZE_WRITE;
   s->version = (unsigned char)az_magic[1];
   s->minor_version = (unsigned char)az_magic[2];
@@ -290,9 +294,9 @@ void check_header(azio_stream *s)
   /* Peek ahead to check the gzip magic header */
   if ( s->stream.next_in[0] == gz_magic[0]  && s->stream.next_in[1] == gz_magic[1])
   {
+    read_header(s, s->stream.next_in);
     s->stream.avail_in -= 2;
     s->stream.next_in += 2;
-    s->version= (unsigned char)2;
 
     /* Check the rest of the gzip header */
     method = get_byte(s);
@@ -321,7 +325,8 @@ void check_header(azio_stream *s)
       for (len = 0; len < 2; len++) (void)get_byte(s);
     }
     s->z_err = s->z_eof ? Z_DATA_ERROR : Z_OK;
-    s->start = my_tell(s->file, MYF(0)) - s->stream.avail_in;
+    if (!s->start)
+      s->start= my_tell(s->file, MYF(0)) - s->stream.avail_in;
   }
   else if ( s->stream.next_in[0] == az_magic[0]  && s->stream.next_in[1] == az_magic[1])
   {
@@ -365,9 +370,11 @@ void read_header(azio_stream *s, unsigne
   else if (buffer[0] == gz_magic[0]  && buffer[1] == gz_magic[1])
   {
     /*
-      Set version number to previous version (2).
+      Set version number to previous version (1).
     */
-    s->version= (unsigned char) 2;
+    s->version= 1;
+    s->auto_increment= 0;
+    s->frm_length= 0;
   } else {
     /*
       Unknown version.

=== modified file 'storage/archive/ha_archive.cc'
--- a/storage/archive/ha_archive.cc	2011-09-21 11:01:41 +0000
+++ b/storage/archive/ha_archive.cc	2011-10-07 06:03:14 +0000
@@ -93,6 +93,11 @@
      inserts a lot faster, but would mean highly arbitrary reads.
 
     -Brian
+
+  Archive file format versions:
+  <5.1.5 - v.1
+  5.1.5-5.1.15 - v.2
+  >5.1.15 - v.3
 */
 
 /* Variables for archive share methods */
@@ -104,6 +109,14 @@ static HASH archive_open_tables;
 #define ARN ".ARN"               // Files used during an optimize call
 #define ARM ".ARM"               // Meta file (deprecated)
 
+/* 5.0 compatibility */
+#define META_V1_OFFSET_CHECK_HEADER  0
+#define META_V1_OFFSET_VERSION       1
+#define META_V1_OFFSET_ROWS_RECORDED 2
+#define META_V1_OFFSET_CHECK_POINT   10
+#define META_V1_OFFSET_CRASHED       18
+#define META_V1_LENGTH               19
+
 /*
   uchar + uchar
 */
@@ -281,6 +294,106 @@ err:
   DBUG_RETURN(1);
 }
 
+
+/**
+  @brief Read version 1 meta file (5.0 compatibility routine).
+
+  @return Completion status
+    @retval  0 Success
+    @retval !0 Failure
+*/
+
+int ha_archive::read_v1_metafile()
+{
+  char file_name[FN_REFLEN];
+  uchar buf[META_V1_LENGTH];
+  File fd;
+  DBUG_ENTER("ha_archive::read_v1_metafile");
+
+  fn_format(file_name, share->data_file_name, "", ARM, MY_REPLACE_EXT);
+  if ((fd= my_open(file_name, O_RDONLY, MYF(0))) == -1)
+    DBUG_RETURN(-1);
+
+  if (my_read(fd, buf, sizeof(buf), MYF(0)) != sizeof(buf))
+  {
+    my_close(fd, MYF(0));
+    DBUG_RETURN(-1);
+  }
+  
+  share->rows_recorded= uint8korr(buf + META_V1_OFFSET_ROWS_RECORDED);
+  share->crashed= buf[META_V1_OFFSET_CRASHED];
+  my_close(fd, MYF(0));
+  DBUG_RETURN(0);
+}
+
+
+/**
+  @brief Write version 1 meta file (5.0 compatibility routine).
+
+  @return Completion status
+    @retval  0 Success
+    @retval !0 Failure
+*/
+
+int ha_archive::write_v1_metafile()
+{
+  char file_name[FN_REFLEN];
+  uchar buf[META_V1_LENGTH];
+  File fd;
+  DBUG_ENTER("ha_archive::write_v1_metafile");
+
+  buf[META_V1_OFFSET_CHECK_HEADER]= ARCHIVE_CHECK_HEADER;
+  buf[META_V1_OFFSET_VERSION]= 1;
+  int8store(buf + META_V1_OFFSET_ROWS_RECORDED, share->rows_recorded);
+  int8store(buf + META_V1_OFFSET_CHECK_POINT, (ulonglong) 0);
+  buf[META_V1_OFFSET_CRASHED]= share->crashed;
+  
+  fn_format(file_name, share->data_file_name, "", ARM, MY_REPLACE_EXT);
+  if ((fd= my_open(file_name, O_WRONLY, MYF(0))) == -1)
+    DBUG_RETURN(-1);
+
+  if (my_write(fd, buf, sizeof(buf), MYF(0)) != sizeof(buf))
+  {
+    my_close(fd, MYF(0));
+    DBUG_RETURN(-1);
+  }
+  
+  my_close(fd, MYF(0));
+  DBUG_RETURN(0);
+}
+
+
+/**
+  @brief Pack version 1 row (5.0 compatibility routine).
+
+  @param[in]  record  the record to pack
+
+  @return Length of packed row
+*/
+
+unsigned int ha_archive::pack_row_v1(uchar *record)
+{
+  uint *blob, *end;
+  uchar *pos;
+  DBUG_ENTER("pack_row_v1");
+  memcpy(record_buffer->buffer, record, table->s->reclength);
+  pos= record_buffer->buffer + table->s->reclength;
+  for (blob= table->s->blob_field, end= blob + table->s->blob_fields;
+       blob != end; blob++)
+  {
+    uint32 length= ((Field_blob *) table->field[*blob])->get_length();
+    if (length)
+    {
+      uchar *data_ptr;
+      ((Field_blob *) table->field[*blob])->get_ptr(&data_ptr);
+      memcpy(pos, data_ptr, length);
+      pos+= length;
+    }
+  }
+  DBUG_RETURN(pos - record_buffer->buffer);
+}
+
+
 /*
   This method reads the header of a datafile and returns whether or not it was successful.
 */
@@ -390,12 +503,8 @@ ARCHIVE_SHARE *ha_archive::get_share(con
     stats.auto_increment_value= archive_tmp.auto_increment + 1;
     share->rows_recorded= (ha_rows)archive_tmp.rows;
     share->crashed= archive_tmp.dirty;
-    /*
-      If archive version is less than 3, It should be upgraded before
-      use.
-    */
-    if (archive_tmp.version < ARCHIVE_VERSION)
-      *rc= HA_ERR_TABLE_NEEDS_UPGRADE;
+    if (archive_tmp.version == 1)
+      read_v1_metafile();
     azclose(&archive_tmp);
 
     (void) my_hash_insert(&archive_open_tables, (uchar*) share);
@@ -441,6 +550,8 @@ int ha_archive::free_share()
     */
     if (share->archive_write_open)
     {
+      if (share->archive_write.version == 1)
+        write_v1_metafile();
       if (azclose(&(share->archive_write)))
         rc= 1;
     }
@@ -527,13 +638,7 @@ int ha_archive::open(const char *name, i
                       (open_options & HA_OPEN_FOR_REPAIR) ? "yes" : "no"));
   share= get_share(name, &rc);
 
- /*
-    Allow open on crashed table in repair mode only.
-    Block open on 5.0 ARCHIVE table. Though we have almost all
-    routines to access these tables, they were not well tested.
-    For now we have to refuse to open such table to avoid
-    potential data loss.
-  */
+  /* Allow open on crashed table in repair mode only. */
   switch (rc)
   {
   case 0:
@@ -541,8 +646,6 @@ int ha_archive::open(const char *name, i
   case HA_ERR_CRASHED_ON_USAGE:
     if (open_options & HA_OPEN_FOR_REPAIR)
       break;
-    /* fall through */
-  case HA_ERR_TABLE_NEEDS_UPGRADE:
     free_share();
     /* fall through */
   default:
@@ -610,11 +713,40 @@ int ha_archive::close(void)
 }
 
 
+void ha_archive::frm_load(const char *name, azio_stream *dst)
+{
+  char name_buff[FN_REFLEN];
+  MY_STAT file_stat;
+  File frm_file;
+  uchar *frm_ptr;
+  DBUG_ENTER("ha_archive::frm_load");
+  fn_format(name_buff, name, "", ".frm", MY_REPLACE_EXT | MY_UNPACK_FILENAME);
+
+  /* Here is where we open up the frm and pass it to archive to store */
+  if ((frm_file= my_open(name_buff, O_RDONLY, MYF(0))) >= 0)
+  {
+    if (!mysql_file_fstat(frm_file, &file_stat, MYF(MY_WME)))
+    {
+      frm_ptr= (uchar *) my_malloc(sizeof(uchar) * file_stat.st_size, MYF(0));
+      if (frm_ptr)
+      {
+        if (my_read(frm_file, frm_ptr, file_stat.st_size, MYF(0)) ==
+            (size_t) file_stat.st_size)
+          azwrite_frm(dst, (char *) frm_ptr, file_stat.st_size);
+        my_free(frm_ptr);
+      }
+    }
+    my_close(frm_file, MYF(0));
+  }
+  DBUG_VOID_RETURN;
+}
+
+
 /**
   Copy a frm blob between streams.
 
-  @param  src   The source stream.
-  @param  dst   The destination stream.
+  @param[in]  src   The source stream.
+  @param[in]  dst   The destination stream.
 
   @return Zero on success, non-zero otherwise.
 */
@@ -624,6 +756,13 @@ int ha_archive::frm_copy(azio_stream *sr
   int rc= 0;
   char *frm_ptr;
 
+  /* If there is no .frm in source stream, try to read .frm from file. */
+  if (!src->frm_length)
+  {
+    frm_load(table->s->normalized_path.str, dst);
+    return 0;
+  }
+
   if (!(frm_ptr= (char *) my_malloc(src->frm_length, MYF(0))))
     return HA_ERR_OUT_OF_MEM;
 
@@ -654,9 +793,7 @@ int ha_archive::create(const char *name,
   char linkname[FN_REFLEN];
   int error;
   azio_stream create_stream;            /* Archive file we are working with */
-  File frm_file;                   /* File handler for readers */
   MY_STAT file_stat;  // Stat information for the data file
-  uchar *frm_ptr;
 
   DBUG_ENTER("ha_archive::create");
 
@@ -716,26 +853,8 @@ int ha_archive::create(const char *name,
 
     if (linkname[0])
       my_symlink(name_buff, linkname, MYF(0));
-    fn_format(name_buff, name, "", ".frm",
-              MY_REPLACE_EXT | MY_UNPACK_FILENAME);
 
-    /*
-      Here is where we open up the frm and pass it to archive to store 
-    */
-    if ((frm_file= my_open(name_buff, O_RDONLY, MYF(0))) > 0)
-    {
-      if (!mysql_file_fstat(frm_file, &file_stat, MYF(MY_WME)))
-      {
-        frm_ptr= (uchar *)my_malloc(sizeof(uchar) * file_stat.st_size, MYF(0));
-        if (frm_ptr)
-        {
-          my_read(frm_file, frm_ptr, file_stat.st_size, MYF(0));
-          azwrite_frm(&create_stream, (char *)frm_ptr, file_stat.st_size);
-          my_free(frm_ptr);
-        }
-      }
-      my_close(frm_file, MYF(0));
-    }
+    frm_load(name, &create_stream);
 
     if (create_info->comment.str)
       azwrite_comment(&create_stream, create_info->comment.str, 
@@ -829,6 +948,9 @@ unsigned int ha_archive::pack_row(uchar 
   if (fix_rec_buff(max_row_length(record)))
     DBUG_RETURN(HA_ERR_OUT_OF_MEM); /* purecov: inspected */
 
+  if (share->archive_write.version == 1)
+    DBUG_RETURN(pack_row_v1(record));
+
   /* Copy null bits */
   memcpy(record_buffer->buffer+ARCHIVE_ROW_HEADER_SIZE, 
          record, table->s->null_bytes);
@@ -1382,6 +1504,8 @@ int ha_archive::optimize(THD* thd, HA_CH
   // now we close both our writer and our reader for the rename
   if (share->archive_write_open)
   {
+    if (share->archive_write.version == 1)
+      write_v1_metafile();
     azclose(&(share->archive_write));
     share->archive_write_open= FALSE;
   }
@@ -1664,6 +1788,29 @@ bool ha_archive::is_crashed() const 
   DBUG_RETURN(share->crashed); 
 }
 
+
+/**
+  @brief Check for upgrade
+
+  @param[in]  check_opt  check options
+
+  @return Completion status
+    @retval HA_ADMIN_OK            No upgrade required
+    @retval HA_ADMIN_CORRUPT       Cannot read meta-data
+    @retval HA_ADMIN_NEEDS_UPGRADE Upgrade required
+*/
+
+int ha_archive::check_for_upgrade(HA_CHECK_OPT *check_opt)
+{
+  DBUG_ENTER("ha_archive::check_for_upgrade");
+  if (init_archive_reader())
+    DBUG_RETURN(HA_ADMIN_CORRUPT);
+  if (archive.version < ARCHIVE_VERSION)
+    DBUG_RETURN(HA_ADMIN_NEEDS_UPGRADE);
+  DBUG_RETURN(HA_ADMIN_OK);
+}
+
+
 /*
   Simple scan of the tables to make sure everything is ok.
 */
@@ -1677,9 +1824,12 @@ int ha_archive::check(THD* thd, HA_CHECK
 
   old_proc_info= thd_proc_info(thd, "Checking table");
   /* Flush any waiting data */
-  mysql_mutex_lock(&share->mutex);
-  azflush(&(share->archive_write), Z_SYNC_FLUSH);
-  mysql_mutex_unlock(&share->mutex);
+  if (share->archive_write_open)
+  {
+    mysql_mutex_lock(&share->mutex);
+    azflush(&(share->archive_write), Z_SYNC_FLUSH);
+    mysql_mutex_unlock(&share->mutex);
+  }
 
   if (init_archive_reader())
     DBUG_RETURN(HA_ADMIN_CORRUPT);

=== modified file 'storage/archive/ha_archive.h'
--- a/storage/archive/ha_archive.h	2011-06-30 15:50:45 +0000
+++ b/storage/archive/ha_archive.h	2011-10-07 06:03:14 +0000
@@ -72,6 +72,10 @@ class ha_archive: public handler
   archive_record_buffer *create_record_buffer(unsigned int length);
   void destroy_record_buffer(archive_record_buffer *r);
   int frm_copy(azio_stream *src, azio_stream *dst);
+  void frm_load(const char *name, azio_stream *dst);
+  int read_v1_metafile();
+  int write_v1_metafile();
+  unsigned int pack_row_v1(uchar *record);
 
 public:
   ha_archive(handlerton *hton, TABLE_SHARE *table_arg);
@@ -139,6 +143,7 @@ public:
   THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
                              enum thr_lock_type lock_type);
   bool is_crashed() const;
+  int check_for_upgrade(HA_CHECK_OPT *check_opt);
   int check(THD* thd, HA_CHECK_OPT* check_opt);
   bool check_and_repair(THD *thd);
   uint32 max_row_length(const uchar *buf);

No bundle (reason: useless for push emails).
Thread
bzr push into mysql-trunk branch (sergey.vojtovich:3476 to 3477) Sergey Vojtovich7 Oct