List:Commits« Previous MessageNext Message »
From:magnus.blaudd Date:May 11 2012 12:18pm
Subject:bzr push into mysql-trunk branch (magnus.blaudd:3815 to 3816)
View as plain text  
 3816 magnus.blaudd@stripped	2012-05-11 [merge]
      Merge trunk-wl5906 -> trunk

    modified:
      sql/ha_ndbcluster.cc
      sql/ha_ndbcluster.h
      sql/handler.h
      sql/sql_delete.cc
      sql/sql_update.cc
      sql/table.cc
      sql/table.h
 3815 Jorgen Loland	2012-05-11
      Reenable index_merge_innodb tests disabled by BUG#45727. 
      
      The test is marked as experimental since it is expected that 
      it will fail due to varying row estimates in EXPLAIN. It will
      be removed from the experimental list once it has stabilized
      in PB
     @ mysql-test/collections/default.experimental
        An attempt at reenabling

    modified:
      mysql-test/collections/default.experimental
      mysql-test/include/index_merge1.inc
      mysql-test/include/index_merge2.inc
      mysql-test/include/index_merge_ror.inc
      mysql-test/r/index_merge_innodb.result
      mysql-test/t/index_merge_innodb.test
=== modified file 'sql/ha_ndbcluster.cc'
--- a/sql/ha_ndbcluster.cc	2012-03-28 11:25:37 +0000
+++ b/sql/ha_ndbcluster.cc	2012-04-25 12:30:33 +0000
@@ -7138,10 +7138,10 @@ int ha_ndbcluster::extra(enum ha_extra_f
 }
 
 
-bool ha_ndbcluster::read_before_write_removal_possible()
+bool ha_ndbcluster::start_read_removal()
 {
   THD *thd= table->in_use;
-  DBUG_ENTER("read_before_write_removal_possible");
+  DBUG_ENTER("start_read_removal");
 
   if (uses_blob_value(table->write_set))
   {
@@ -7188,9 +7188,10 @@ bool ha_ndbcluster::read_before_write_re
 }
 
 
-ha_rows ha_ndbcluster::read_before_write_removal_rows_written(void) const
+ha_rows ha_ndbcluster::end_read_removal(void)
 {
-  DBUG_ENTER("read_before_write_removal_rows_written");
+  DBUG_ENTER("end_read_removal");
+  DBUG_ASSERT(m_read_before_write_removal_possible);
   DBUG_PRINT("info", ("updated: %llu, deleted: %llu",
                       m_rows_updated, m_rows_deleted));
   DBUG_RETURN(m_rows_updated + m_rows_deleted);

=== modified file 'sql/ha_ndbcluster.h'
--- a/sql/ha_ndbcluster.h	2012-03-28 11:25:37 +0000
+++ b/sql/ha_ndbcluster.h	2012-04-25 12:30:33 +0000
@@ -305,9 +305,8 @@ class ha_ndbcluster: public handler
 #endif
   void get_dynamic_partition_info(PARTITION_STATS *stat_info, uint part_id);
   uint32 calculate_key_hash_value(Field **field_array);
-  bool read_before_write_removal_supported() const { return true; }
-  bool read_before_write_removal_possible();
-  ha_rows read_before_write_removal_rows_written(void) const;
+  bool start_read_removal(void);
+  ha_rows end_read_removal(void);
   int extra(enum ha_extra_function operation);
   int extra_opt(enum ha_extra_function operation, ulong cache_size);
   int reset();

=== modified file 'sql/handler.h'
--- a/sql/handler.h	2012-05-11 08:04:48 +0000
+++ b/sql/handler.h	2012-05-11 12:05:39 +0000
@@ -184,8 +184,37 @@ enum enum_alter_inplace_result {
  */
 #define HA_BINLOG_FLAGS (HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE)
 
-/*
+/**
   The handler supports read before write removal optimization
+
+  Read before write removal may be used for storage engines which support
+  write without previous read of the row to be updated. Handler returning
+  this flag must implement start_read_removal() and end_read_removal().
+  The handler may return "fake" rows constructed from the key of the row
+  asked for. This is used to optimize UPDATE and DELETE by reducing the
+  numer of roundtrips between handler and storage engine.
+  
+  Example:
+  UPDATE a=1 WHERE pk IN (<keys>)
+
+  mysql_update()
+  {
+    if (<conditions for starting read removal>)
+      start_read_removal()
+      -> handler returns true if read removal supported for this table/query
+
+    while(read_record("pk=<key>"))
+      -> handler returns fake row with column "pk" set to <key>
+
+      ha_update_row()
+      -> handler sends write "a=1" for row with "pk=<key>"
+
+    end_read_removal()
+    -> handler returns the number of rows actually written
+  }
+
+  @note This optimization in combination with batching may be used to
+        remove even more roundtrips.
 */
 #define HA_READ_BEFORE_WRITE_REMOVAL  (LL(1) << 38)
 
@@ -2139,6 +2168,21 @@ public:
   { return extra(operation); }
 
   /**
+    Start read (before write) removal on the current table.
+    @see HA_READ_BEFORE_WRITE_REMOVAL
+  */
+  virtual bool start_read_removal(void)
+  { DBUG_ASSERT(0); return false; }
+
+  /**
+    End read (before write) removal and return the number of rows
+    really written
+    @see HA_READ_BEFORE_WRITE_REMOVAL
+  */
+  virtual ha_rows end_read_removal(void)
+  { DBUG_ASSERT(0); return (ha_rows) 0; }
+
+  /**
     In an UPDATE or DELETE, if the row under the cursor was locked by another
     transaction, and the engine used an optimistic read of the last
     committed row value under the cursor, then the engine returns 1 from this

=== modified file 'sql/sql_delete.cc'
--- a/sql/sql_delete.cc	2012-03-06 14:29:42 +0000
+++ b/sql/sql_delete.cc	2012-04-25 12:30:33 +0000
@@ -57,6 +57,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *
   bool          const_cond_result;
   ha_rows	deleted= 0;
   bool          reverse= FALSE;
+  bool          read_removal= false;
   bool          skip_record;
   bool          need_sort= FALSE;
   bool          err= true;
@@ -342,9 +343,13 @@ bool mysql_delete(THD *thd, TABLE_LIST *
   else
     will_batch= !table->file->start_bulk_delete();
 
-
   table->mark_columns_needed_for_delete();
 
+  if ((table->file->ha_table_flags() & HA_READ_BEFORE_WRITE_REMOVAL) &&
+      !using_limit &&
+      select && select->quick && select->quick->index != MAX_KEY)
+    read_removal= table->check_read_removal(select->quick->index);
+
   while (!(error=info.read_record(&info)) && !thd->killed &&
 	 ! thd->is_error())
   {
@@ -404,6 +409,11 @@ bool mysql_delete(THD *thd, TABLE_LIST *
       table->file->print_error(loc_error,MYF(0));
     error=1;
   }
+  if (read_removal)
+  {
+    /* Only handler knows how many records were really written */
+    deleted= table->file->end_read_removal();
+  }
   THD_STAGE_INFO(thd, stage_end);
   end_read_record(&info);
   if (options & OPTION_QUICK)

=== modified file 'sql/sql_update.cc'
--- a/sql/sql_update.cc	2012-05-10 08:49:23 +0000
+++ b/sql/sql_update.cc	2012-05-11 12:05:39 +0000
@@ -163,6 +163,34 @@ static bool check_fields(THD *thd, List<
 
 
 /**
+  Check if all expressions in list are constant expressions
+
+  @param[in] values List of expressions
+
+  @retval true Only constant expressions
+  @retval false At least one non-constant expression
+*/
+
+static bool check_constant_expressions(List<Item> &values)
+{
+  Item *value;
+  List_iterator_fast<Item> v(values);
+  DBUG_ENTER("check_constant_expressions");
+
+  while ((value= v++))
+  {
+    if (!value->const_item())
+    {
+      DBUG_PRINT("exit", ("expression is not constant"));
+      DBUG_RETURN(false);
+    }
+  }
+  DBUG_PRINT("exit", ("expression is constant"));
+  DBUG_RETURN(true);
+}
+
+
+/**
   Re-read record if more columns are needed for error message.
 
   If we got a duplicate key error, we want to write an error
@@ -272,6 +300,7 @@ int mysql_update(THD *thd,
   bool          need_sort= TRUE;
   bool          reverse= FALSE;
   bool          using_filesort;
+  bool          read_removal= false;
 #ifndef NO_EMBEDDED_ACCESS_CHECKS
   uint		want_privilege;
 #endif
@@ -668,6 +697,12 @@ int mysql_update(THD *thd,
   else
     will_batch= !table->file->start_bulk_update();
 
+  if ((table->file->ha_table_flags() & HA_READ_BEFORE_WRITE_REMOVAL) &&
+      !ignore && !using_limit &&
+      select && select->quick && select->quick->index != MAX_KEY &&
+      check_constant_expressions(values))
+    read_removal= table->check_read_removal(select->quick->index);
+
   // For prepare_record_for_error_message():
   if (table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ)
     table->prepare_for_position();
@@ -879,6 +914,14 @@ int mysql_update(THD *thd,
     table->file->end_bulk_update();
   table->file->try_semi_consistent_read(0);
 
+  if (read_removal)
+  {
+    /* Only handler knows how many records really was written */
+    updated= table->file->end_read_removal();
+    if (!records_are_comparable(table))
+      found= updated;
+  }
+
   if (!transactional_table && updated > 0)
     thd->transaction.stmt.mark_modified_non_trans_table();
 

=== modified file 'sql/table.cc'
--- a/sql/table.cc	2012-05-11 08:04:48 +0000
+++ b/sql/table.cc	2012-05-11 12:05:39 +0000
@@ -6024,6 +6024,39 @@ Field *TABLE::get_timestamp_field()
 
 
 /**
+  Read removal is possible if the selected quick read
+  method is using full unique index
+
+  @see HA_READ_BEFORE_WRITE_REMOVAL
+
+  @param index              Number of the index used for read
+
+  @retval true   success, read removal started
+  @retval false  read removal not started
+*/
+
+bool TABLE::check_read_removal(uint index)
+{
+  DBUG_ENTER("check_read_removal");
+  DBUG_ASSERT(file->ha_table_flags() & HA_READ_BEFORE_WRITE_REMOVAL);
+  DBUG_ASSERT(index != MAX_KEY);
+
+  // Index must be unique
+  if ((key_info[index].flags & HA_NOSAME) == 0)
+    DBUG_RETURN(false);
+
+  // Full index must be used
+  bitmap_clear_all(&tmp_set);
+  mark_columns_used_by_index_no_reset(index, &tmp_set);
+  if (!bitmap_cmp(&tmp_set, read_set))
+    DBUG_RETURN(false);
+
+  // Start read removal in handler
+  DBUG_RETURN(file->start_read_removal());
+}
+
+
+/**
   Test if the order list consists of simple field expressions
 
   @param order                Linked list of ORDER BY arguments

=== modified file 'sql/table.h'
--- a/sql/table.h	2012-05-09 16:47:39 +0000
+++ b/sql/table.h	2012-05-11 12:05:39 +0000
@@ -1171,6 +1171,8 @@ public:
   Field *get_timestamp_field();
 
   bool update_const_key_parts(Item *conds);
+
+  bool check_read_removal(uint index);
 };
 
 

No bundle (reason: useless for push emails).
Thread
bzr push into mysql-trunk branch (magnus.blaudd:3815 to 3816) magnus.blaudd7 Jun