MySQL Lists are EOL. Please join:

List:Commits« Previous MessageNext Message »
From:antony Date:August 1 2006 4:36pm
Subject:bk commit into 5.0 tree (acurtis:1.2240) BUG#15669
View as plain text  
Below is the list of changes that have just been committed into a local
5.0 repository of antony. When antony does a push these changes will
be propagated to the main repository and, within 24 hours after the
push, to the public repository.
For information on how to access the public repository
see http://dev.mysql.com/doc/mysql/en/installing-source-tree.html

ChangeSet@stripped, 2006-08-01 09:36:34-07:00, acurtis@stripped +1 -0
  Bug#15669
    "Test case 'csv' produces incorrect result on OpenBSD"
    mmapped pages were not being invalidated when writes occurred to the
    file vi a fd i/o operation. 
    Force explicit invalidation and not rely on implicit invalidation.

  sql/examples/ha_tina.cc@stripped, 2006-08-01 09:36:30-07:00, acurtis@stripped +56 -8
    Bug#15669
      Make sure to invalidate in memory pages when the file has been 
      altered by fd i/o.
      This is important to some operating systems, such as OpenBSD.

# This is a BitKeeper patch.  What follows are the unified diffs for the
# set of deltas contained in the patch.  The rest of the patch, the part
# that BitKeeper cares about, is below these diffs.
# User:	acurtis
# Host:	ltantony.xiphis.org
# Root:	/home/antony/work2/p2-bug15669.5

--- 1.22/sql/examples/ha_tina.cc	2006-08-01 09:36:48 -07:00
+++ 1.23/sql/examples/ha_tina.cc	2006-08-01 09:36:48 -07:00
@@ -101,13 +101,34 @@
   return (byte*) share->table_name;
 }
 
+
+int free_mmap(TINA_SHARE *share)
+{
+  DBUG_ENTER("ha_tina::free_mmap");
+  if (share->mapped_file)
+  {
+    /*
+      Invalidate the mapped in pages. Some operating systems (eg OpenBSD)
+      would reuse already cached pages even if the file has been altered
+      using fd based I/O. This may be optimized by perhaps only invalidating
+      the last page but optimization of deprecated code is not important.
+    */
+    msync(share->mapped_file, 0, MS_INVALIDATE);
+    if (munmap(share->mapped_file, share->file_stat.st_size))
+      DBUG_RETURN(1);
+  }
+  share->mapped_file= NULL;
+  DBUG_RETURN(0);
+}
+
 /*
   Reloads the mmap file.
 */
 int get_mmap(TINA_SHARE *share, int write)
 {
   DBUG_ENTER("ha_tina::get_mmap");
-  if (share->mapped_file && munmap(share->mapped_file, share->file_stat.st_size))
+  
+  if (free_mmap(share))
     DBUG_RETURN(1);
 
   if (my_fstat(share->data_file, &share->file_stat, MYF(MY_WME)) == -1)
@@ -230,8 +251,7 @@
   int result_code= 0;
   if (!--share->use_count){
     /* Drop the mapped file */
-    if (share->mapped_file) 
-      munmap(share->mapped_file, share->file_stat.st_size);
+    free_mmap(share);
     result_code= my_close(share->data_file,MYF(0));
     hash_delete(&tina_open_tables, (byte*) share);
     thr_lock_delete(&share->lock);
@@ -493,6 +513,13 @@
 
   size= encode_quote(buf);
 
+  /*
+    we are going to alter the file so we must invalidate the in memory pages
+    otherwise we risk a race between the in memory pages and the disk pages.
+  */
+  if (free_mmap(share))
+    DBUG_RETURN(-1);
+
   if (my_write(share->data_file, buffer.ptr(), size, MYF(MY_WME | MY_NABP)))
     DBUG_RETURN(-1);
 
@@ -534,8 +561,26 @@
   if (chain_append())
     DBUG_RETURN(-1);
 
+  /*
+    we are going to alter the file so we must invalidate the in memory pages
+    otherwise we risk a race between the in memory pages and the disk pages.
+  */
+  if (free_mmap(share))
+    DBUG_RETURN(-1);
+
   if (my_write(share->data_file, buffer.ptr(), size, MYF(MY_WME | MY_NABP)))
     DBUG_RETURN(-1);
+
+  /* 
+    Ok, this is means that we will be doing potentially bad things 
+    during a bulk update on some OS'es. Ideally, we should extend the length
+    of the file, redo the mmap and then write all the updated rows. Upon
+    finishing the bulk update, truncate the file length to the final length.
+    Since this code is all being deprecated, not point now to optimize.
+  */
+  if (get_mmap(share, 0) > 0) 
+    DBUG_RETURN(-1);
+
   DBUG_RETURN(0);
 }
 
@@ -812,15 +857,14 @@
       length= length - (size_t)(ptr->end - ptr->begin);
     }
 
-    /* Truncate the file to the new size */
-    if (my_chsize(share->data_file, length, 0, MYF(MY_WME)))
+    /* Invalidate all cached mmap pages */
+    if (free_mmap(share))
       DBUG_RETURN(-1);
 
-    if (munmap(share->mapped_file, length))
+    /* Truncate the file to the new size */
+    if (my_chsize(share->data_file, length, 0, MYF(MY_WME)))
       DBUG_RETURN(-1);
 
-    /* We set it to null so that get_mmap() won't try to unmap it */
-    share->mapped_file= NULL;
     if (get_mmap(share, 0) > 0) 
       DBUG_RETURN(-1);
   }
@@ -837,6 +881,10 @@
 
   if (!records_is_known)
     return (my_errno=HA_ERR_WRONG_COMMAND);
+
+  /* Invalidate all cached mmap pages */
+  if (free_mmap(share)) 
+    DBUG_RETURN(-1);
 
   int rc= my_chsize(share->data_file, 0, 0, MYF(MY_WME));
 
Thread
bk commit into 5.0 tree (acurtis:1.2240) BUG#15669antony1 Aug