MySQL Lists are EOL. Please join:

List:Commits« Previous MessageNext Message »
From:Davi Arnaut Date:July 23 2010 12:04pm
Subject:bzr commit into mysql-trunk-bugfixing branch (davi:3133) Bug#45377
View as plain text  
# At a local mysql-trunk-bugfixing repository of davi

 3133 Davi Arnaut	2010-07-23
      Bug#45377: ARCHIVE tables aren't discoverable after OPTIMIZE
      
      The problem was that the optimize method of the ARCHIVE storage
      engine was not preserving the FRM embedded in the ARZ file when
      rewriting the ARZ file for optimization. The ARCHIVE engine stores
      the FRM in the ARZ file so it can be transferred from machine to
      machine without also copying the FRM -- the engine restores the
      embedded FRM during discovery.
      
      The solution is to copy over the FRM when rewriting the ARZ file.
      In addition, some initial error checking is performed to ensure
      garbage is not copied over.
     @ mysql-test/t/archive.test
        Add test case for Bug#45377.
     @ storage/archive/azio.c
        Add error checking to ensure that the I/O operations are
        successful.
     @ storage/archive/ha_archive.cc
        Copy over the embedded FRM.

    modified:
      mysql-test/r/archive.result
      mysql-test/t/archive.test
      storage/archive/azio.c
      storage/archive/ha_archive.cc
=== modified file 'mysql-test/r/archive.result'
--- a/mysql-test/r/archive.result	2010-06-06 11:19:29 +0000
+++ b/mysql-test/r/archive.result	2010-07-23 12:04:22 +0000
@@ -12775,3 +12775,29 @@ a
 1
 2
 DROP TABLE t1;
+#
+# Bug#45377: ARCHIVE tables aren't discoverable after OPTIMIZE
+#
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (a int) ENGINE=ARCHIVE;
+SHOW CREATE TABLE t1;
+Table	Create Table
+t1	CREATE TABLE `t1` (
+  `a` int(11) DEFAULT NULL
+) ENGINE=ARCHIVE DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES (1);
+OPTIMIZE TABLE t1;
+Table	Op	Msg_type	Msg_text
+test.t1	optimize	status	OK
+FLUSH TABLES;
+INSERT INTO t1 VALUES (2);
+SELECT * FROM t1 ORDER BY a;
+a
+1
+2
+SHOW CREATE TABLE t1;
+Table	Create Table
+t1	CREATE TABLE `t1` (
+  `a` int(11) DEFAULT NULL
+) ENGINE=ARCHIVE DEFAULT CHARSET=latin1
+DROP TABLE t1;

=== modified file 'mysql-test/t/archive.test'
--- a/mysql-test/t/archive.test	2010-06-06 11:19:29 +0000
+++ b/mysql-test/t/archive.test	2010-07-23 12:04:22 +0000
@@ -1701,3 +1701,24 @@ SELECT * FROM t1;
 REPAIR TABLE t1 EXTENDED;
 SELECT * FROM t1;
 DROP TABLE t1;
+
+
+--echo #
+--echo # Bug#45377: ARCHIVE tables aren't discoverable after OPTIMIZE
+--echo #
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+CREATE TABLE t1 (a int) ENGINE=ARCHIVE;
+SHOW CREATE TABLE t1;
+INSERT INTO t1 VALUES (1);
+OPTIMIZE TABLE t1;
+let $MYSQLD_DATADIR= `select @@datadir`;
+remove_file $MYSQLD_DATADIR/test/t1.frm;
+FLUSH TABLES;
+INSERT INTO t1 VALUES (2);
+SELECT * FROM t1 ORDER BY a;
+SHOW CREATE TABLE t1;
+DROP TABLE t1;

=== modified file 'storage/archive/azio.c'
--- a/storage/archive/azio.c	2009-11-17 12:14:27 +0000
+++ b/storage/archive/azio.c	2010-07-23 12:04:22 +0000
@@ -31,7 +31,7 @@ int az_open(azio_stream *s, const char *
 int do_flush(azio_stream *file, int flush);
 int    get_byte(azio_stream *s);
 void   check_header(azio_stream *s);
-void write_header(azio_stream *s);
+int write_header(azio_stream *s);
 int    destroy(azio_stream *s);
 void putLong(File file, uLong x);
 uLong  getLong(azio_stream *s);
@@ -155,7 +155,7 @@ int az_open (azio_stream *s, const char
 }
 
 
-void write_header(azio_stream *s)
+int write_header(azio_stream *s)
 {
   char buffer[AZHEADER_SIZE + AZMETA_BUFFER_SIZE];
   char *ptr= buffer;
@@ -191,8 +191,8 @@ void write_header(azio_stream *s)
   *(ptr + AZ_DIRTY_POS)= (unsigned char)s->dirty; /* Start of Data Block Index Block */
 
   /* Always begin at the begining, and end there as well */
-  my_pwrite(s->file, (uchar*) buffer, AZHEADER_SIZE + AZMETA_BUFFER_SIZE, 0,
-            MYF(0));
+  return my_pwrite(s->file, (uchar*) buffer, AZHEADER_SIZE + AZMETA_BUFFER_SIZE,
+                   0, MYF(MY_NABP)) ? 1 : 0;
 }
 
 /* ===========================================================================
@@ -838,19 +838,19 @@ int azwrite_frm(azio_stream *s, char *bl
   s->frm_length= length;
   s->start+= length;
 
-  my_pwrite(s->file, (uchar*) blob, s->frm_length, s->frm_start_pos, MYF(0));
-
-  write_header(s);
-  my_seek(s->file, 0, MY_SEEK_END, MYF(0));
+  if (my_pwrite(s->file, (uchar*) blob, s->frm_length,
+                s->frm_start_pos, MYF(MY_NABP)) ||
+      write_header(s) ||
+      (my_seek(s->file, 0, MY_SEEK_END, MYF(0)) == MY_FILEPOS_ERROR))
+    return 1;
 
   return 0;
 }
 
 int azread_frm(azio_stream *s, char *blob)
 {
-  my_pread(s->file, (uchar*) blob, s->frm_length, s->frm_start_pos, MYF(0));
-
-  return 0;
+  return my_pread(s->file, (uchar*) blob, s->frm_length,
+                  s->frm_start_pos, MYF(MY_NABP)) ? 1 : 0;
 }
 
 

=== modified file 'storage/archive/ha_archive.cc'
--- a/storage/archive/ha_archive.cc	2010-07-08 21:20:08 +0000
+++ b/storage/archive/ha_archive.cc	2010-07-23 12:04:22 +0000
@@ -1345,10 +1345,11 @@ int ha_archive::repair(THD* thd, HA_CHEC
 */
 int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
 {
-  DBUG_ENTER("ha_archive::optimize");
   int rc= 0;
+  char *frm_ptr= NULL;
   azio_stream writer;
   char writer_filename[FN_REFLEN];
+  DBUG_ENTER("ha_archive::optimize");
 
   init_archive_reader();
 
@@ -1366,6 +1367,26 @@ int ha_archive::optimize(THD* thd, HA_CH
   if (!(azopen(&writer, writer_filename, O_CREAT|O_RDWR|O_BINARY)))
     DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); 
 
+  /* Transfer the embedded FRM so that the file can be discoverable. */
+  if (!(frm_ptr= (char *)my_malloc(archive.frm_length, MYF(0))))
+  {
+    rc= HA_ERR_OUT_OF_MEM;
+    goto error;
+  }
+
+  my_errno= 0;
+
+  /*
+    Read file offset is repositioned when writing a new header.
+    Write file offset is set to the end of the file.
+  */
+  if (azread_frm(&archive, frm_ptr) ||
+      azwrite_frm(&writer, frm_ptr, archive.frm_length))
+  {
+    rc= my_errno ? my_errno : HA_ERR_INTERNAL_ERROR;
+    goto error;
+  }
+
   /* 
     An extended rebuild is a lot more effort. We open up each row and re-record it. 
     Any dead rows are removed (aka rows that may have been partially recorded). 
@@ -1442,11 +1463,13 @@ int ha_archive::optimize(THD* thd, HA_CH
   // make the file we just wrote be our data file
   rc= my_rename(writer_filename, share->data_file_name, MYF(0));
 
+  my_free(frm_ptr);
 
   DBUG_RETURN(rc);
 error:
   DBUG_PRINT("ha_archive", ("Failed to recover, error was %d", rc));
   azclose(&writer);
+  my_free(frm_ptr);
 
   DBUG_RETURN(rc); 
 }


Attachment: [text/bzr-bundle] bzr/davi.arnaut@oracle.com-20100723120422-hncyay93gdwn1ve1.bundle
Thread
bzr commit into mysql-trunk-bugfixing branch (davi:3133) Bug#45377Davi Arnaut23 Jul
Re: bzr commit into mysql-trunk-bugfixing branch (davi:3133) Bug#45377Sergey Vojtovich26 Jul
  • Re: bzr commit into mysql-trunk-bugfixing branch (davi:3133) Bug#45377Davi Arnaut26 Jul
    • Re: bzr commit into mysql-trunk-bugfixing branch (davi:3133) Bug#45377Sergey Vojtovich27 Jul