List:Commits« Previous MessageNext Message »
From:Olav Sandstaa Date:June 6 2012 12:31pm
Subject:bzr push into mysql-trunk branch (olav.sandstaa:3917 to 3918)
View as plain text  
 3918 Olav Sandstaa	2012-06-06 [merge]
      Merge from mysql-trunk to mysql-trunk-wl6082

    modified:
      .bzrignore
      mysql-test/suite/engines/iuds/r/update_year.result
      mysql-test/suite/engines/iuds/t/update_year.test
      sql/handler.cc
      sql/sql_insert.cc
      sql/sql_tmp_table.cc
      storage/innobase/row/row0import.cc
      storage/innobase/row/row0mysql.cc
 3917 Olav Sandstaa	2012-06-06
      WL#6082 Improve the Disk-Sweep Multi-Range Read cost model
      
      Patch 5: IO cost estimate is too high compared to "standard MRR"
      
      The DS-MRR cost model has two different ways for calculating the
      IO-cost of doing a "disk sweep scan":
      
      a. Disk-sweep model: Used when the data is not stored as a
         clustered index: The model estimates the number of blocks that will
         be accessed and models the time to read these using sequential
         scan/read of a data file. This model is used for MyISAM.
      
      b. If the data is stored as a clustered index: The cost model
         estimates this as having to do one random block access 
         per record. This model does not take into account that
         we actually will be accessing the table as a scan. This
         model is used for InnoDB.
      
      The focus is on improving the model for InnoDB since this does not
      take into account any benefit from sequential access of the table. Due
      to this, the IO-cost estimate will in almost all cases be larger for
      DS-MRR than "standard MRR" (and thus, the cost based DS-MRR will not
      be used for InnoDB).
      
      The fix for this problem is to switch to use the "disk streaming IO
      cost model" (as described as a above) also when the data is stored in
      a clustered index.
     @ mysql-test/suite/opt_trace/r/bugs_no_prot_all.result
        WL#6082: Change in cost estimate due the reduced IO-cost for DS-MRR.
     @ mysql-test/suite/opt_trace/r/bugs_ps_prot_all.result
        WL#6082: Change in cost estimate due the reduced IO-cost for DS-MRR.
     @ sql/handler.cc
        WL#6082: Change the DS-MRR cost model for how the IO-cost of a
        disk sweep when the data is stored as a clustered index.

    modified:
      mysql-test/suite/opt_trace/r/bugs_no_prot_all.result
      mysql-test/suite/opt_trace/r/bugs_ps_prot_all.result
      sql/handler.cc
=== modified file '.bzrignore'
--- a/.bzrignore	2012-05-30 11:16:45 +0000
+++ b/.bzrignore	2012-06-06 10:10:17 +0000
@@ -384,6 +384,7 @@ client/my_decimal.h
 client/my_user.c
 client/mysql
 client/mysql.cpp
+client/mysql_config_editor
 client/mysql_upgrade
 client/mysqladmin
 client/mysqladmin.c

=== modified file 'mysql-test/suite/engines/iuds/r/update_year.result'
--- a/mysql-test/suite/engines/iuds/r/update_year.result	2012-05-24 22:21:16 +0000
+++ b/mysql-test/suite/engines/iuds/r/update_year.result	2012-06-06 10:04:10 +0000
@@ -326,7 +326,7 @@ c2
 1975
 UPDATE t3 SET c2=c2 + 1902 WHERE c2='75';
 Warnings:
-Warning	1264	Out of range value for column 'c2' at row 17
+Warning	1264	Out of range value for column 'c2' at row XX
 SELECT c2 FROM t3;
 c2
 0000

=== modified file 'mysql-test/suite/engines/iuds/t/update_year.test'
--- a/mysql-test/suite/engines/iuds/t/update_year.test	2012-05-24 22:21:16 +0000
+++ b/mysql-test/suite/engines/iuds/t/update_year.test	2012-06-06 10:04:10 +0000
@@ -80,6 +80,7 @@ UPDATE t4 SET c1=c1 + 04 WHERE c2=04;
 SELECT c1 FROM t4;
 --sorted_result
 SELECT c2 FROM t3 WHERE c2=75;
+--replace_regex /(Out of range value for column 'c2' at row) [0-9]+/\1 XX/
 UPDATE t3 SET c2=c2 + 1902 WHERE c2='75';
 --sorted_result
 SELECT c2 FROM t3;

=== modified file 'sql/handler.cc'
--- a/sql/handler.cc	2012-06-06 11:44:43 +0000
+++ b/sql/handler.cc	2012-06-06 12:30:55 +0000
@@ -3351,10 +3351,8 @@ void handler::get_auto_increment(ulonglo
 
 void handler::ha_release_auto_increment()
 {
-#ifdef BUG60114_IS_FIXED
   DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE ||
               m_lock_type != F_UNLCK);
-#endif
   release_auto_increment();
   insert_id_for_cur_row= 0;
   auto_inc_interval_for_cur_row.replace(0, 0, 0);

=== modified file 'sql/sql_insert.cc'
--- a/sql/sql_insert.cc	2012-05-28 11:41:46 +0000
+++ b/sql/sql_insert.cc	2012-06-06 11:13:22 +0000
@@ -1097,8 +1097,6 @@ exit_without_my_ok:
   if (lock_type == TL_WRITE_DELAYED)
     end_delayed_insert(thd);
 #endif
-  if (table != NULL)
-    table->file->ha_release_auto_increment();
   if (!joins_freed)
     free_underlaid_joins(thd, &thd->lex->select_lex);
   thd->abort_on_warning= 0;

=== modified file 'sql/sql_tmp_table.cc'
--- a/sql/sql_tmp_table.cc	2012-05-22 07:02:35 +0000
+++ b/sql/sql_tmp_table.cc	2012-06-06 09:42:30 +0000
@@ -21,6 +21,7 @@
 #include "sql_base.h"
 #include "opt_trace.h"
 #include "debug_sync.h"
+#include "filesort.h"   // filesort_free_buffers
 
 #include <algorithm>
 using std::max;
@@ -1826,6 +1827,8 @@ free_tmp_table(THD *thd, TABLE *entry)
   // Release latches since this can take a long time
   ha_release_temporary_latches(thd);
 
+  filesort_free_buffers(entry, true);
+
   if (entry->file && entry->created)
   {
     if (entry->db_stat)

=== modified file 'storage/innobase/row/row0import.cc'
--- a/storage/innobase/row/row0import.cc	2012-05-23 01:02:07 +0000
+++ b/storage/innobase/row/row0import.cc	2012-06-06 11:04:54 +0000
@@ -1682,21 +1682,17 @@ PageConverter::operator() (
 		page (ie. the block->frame). Therefore the caller should write
 		out the descriptor contents and not block->frame for compressed
 		pages. */
-		
-		buf_flush_init_for_writing(
-			page_type == FIL_PAGE_INDEX || !is_compressed_table()
-			? block->frame : block->page.zip.data,
-			page_type != FIL_PAGE_INDEX || !is_compressed_table()
-			? 0 : m_page_zip_ptr,
-			m_current_lsn);
-
-		/* Calculate and update the checksum of non-btree pages for
-		compressed tables explicitly here. It was not done in the 
-		function buf_flush_init_for_writing() because we deliberately
-		passed in a NULL Zip descriptor to avoid copying data around
-		unnecessarily. For large tables this is a HUGE cost. */
 
-		if (is_compressed_table() && page_type != FIL_PAGE_INDEX) {
+		if (!is_compressed_table() || page_type == FIL_PAGE_INDEX) {
+
+			buf_flush_init_for_writing(
+				!is_compressed_table()
+				? block->frame : block->page.zip.data,
+				!is_compressed_table() ? 0 : m_page_zip_ptr,
+				m_current_lsn);
+		} else {
+			/* Calculate and update the checksum of non-btree
+			pages for compressed tables explicitly here. */
 
 			buf_flush_update_zip_checksum(
 				get_frame(block), get_zip_size(),

=== modified file 'storage/innobase/row/row0mysql.cc'
--- a/storage/innobase/row/row0mysql.cc	2012-06-06 08:05:04 +0000
+++ b/storage/innobase/row/row0mysql.cc	2012-06-06 11:18:12 +0000
@@ -963,12 +963,16 @@ row_get_prebuilt_insert_row(
 
 	ut_ad(prebuilt && table && prebuilt->trx);
 
-	/* Check if a new index has been added that prebuilt doesn't know
-	about. We need to rebuild the query graph. */
+	/* Check if an index has been dropped or a new index has been
+	added that prebuilt does not know about. We may need to rebuild
+	the row insert template. */
 
 	if (prebuilt->ins_node != 0) {
 
-		if (prebuilt->trx_id >= last_index->trx_id) {
+		if (prebuilt->trx_id >= last_index->trx_id
+		    && UT_LIST_GET_LEN(prebuilt->ins_node->entry_list)
+		    == UT_LIST_GET_LEN(table->indexes)) {
+
 			return(prebuilt->ins_node->row);
 		}
 

No bundle (reason: useless for push emails).
Thread
bzr push into mysql-trunk branch (olav.sandstaa:3917 to 3918) Olav Sandstaa6 Jun