List:Commits« Previous MessageNext Message »
From:magnus.blaudd Date:June 10 2011 10:38am
Subject:bzr push into mysql-trunk-cluster branch (magnus.blaudd:3326 to 3327)
View as plain text  
 3327 magnus.blaudd@stripped	2011-06-10 [merge]
      Merge 5.5-cluster -> trunk-cluster

    modified:
      client/mysqltest.cc
      mysql-test/suite/ndb/r/ndb_update_no_read.result
      mysql-test/suite/ndb/t/disabled.def
      sql/ha_ndbcluster.cc
      sql/ha_ndbcluster.h
      sql/ha_ndbcluster_glue.h
      sql/handler.h
      sql/sql_delete.cc
      sql/sql_update.cc
 3326 magnus.blaudd@stripped	2011-06-10
      ndb
       - revert changes to join_outer.result/test which was inadvertently
         merged to trunk-cluster

    modified:
      mysql-test/r/join_outer.result
      mysql-test/t/join_outer.test
=== modified file 'client/mysqltest.cc'
--- a/client/mysqltest.cc	2011-06-09 09:09:10 +0000
+++ b/client/mysqltest.cc	2011-06-10 10:27:38 +0000
@@ -4219,24 +4219,17 @@ int do_save_master_pos()
     if (have_ndbcluster)
     {
       ulonglong start_epoch= 0, handled_epoch= 0,
-	latest_epoch=0, latest_trans_epoch=0,
-	latest_handled_binlog_epoch= 0, latest_received_binlog_epoch= 0,
-	latest_applied_binlog_epoch= 0;
+	latest_trans_epoch=0,
+	latest_handled_binlog_epoch= 0;
       int count= 0;
       int do_continue= 1;
       while (do_continue)
       {
         const char binlog[]= "binlog";
-	const char latest_epoch_str[]=
-          "latest_epoch=";
         const char latest_trans_epoch_str[]=
           "latest_trans_epoch=";
-	const char latest_received_binlog_epoch_str[]=
-	  "latest_received_binlog_epoch";
         const char latest_handled_binlog_epoch_str[]=
           "latest_handled_binlog_epoch=";
-        const char latest_applied_binlog_epoch_str[]=
-          "latest_applied_binlog_epoch=";
         if (count)
           my_sleep(100*1000); /* 100ms */
         if (mysql_query(mysql, query= "show engine ndb status"))
@@ -4250,18 +4243,6 @@ int do_save_master_pos()
           {
             const char *status= row[2];
 
-	    /* latest_epoch */
-	    while (*status && strncmp(status, latest_epoch_str,
-				      sizeof(latest_epoch_str)-1))
-	      status++;
-	    if (*status)
-            {
-	      status+= sizeof(latest_epoch_str)-1;
-	      latest_epoch= strtoull(status, (char**) 0, 10);
-	    }
-	    else
-	      die("result does not contain '%s' in '%s'",
-		  latest_epoch_str, query);
 	    /* latest_trans_epoch */
 	    while (*status && strncmp(status, latest_trans_epoch_str,
 				      sizeof(latest_trans_epoch_str)-1))
@@ -4274,19 +4255,7 @@ int do_save_master_pos()
 	    else
 	      die("result does not contain '%s' in '%s'",
 		  latest_trans_epoch_str, query);
-	    /* latest_received_binlog_epoch */
-	    while (*status &&
-		   strncmp(status, latest_received_binlog_epoch_str,
-			   sizeof(latest_received_binlog_epoch_str)-1))
-	      status++;
-	    if (*status)
-	    {
-	      status+= sizeof(latest_received_binlog_epoch_str)-1;
-	      latest_received_binlog_epoch= strtoull(status, (char**) 0, 10);
-	    }
-	    else
-	      die("result does not contain '%s' in '%s'",
-		  latest_received_binlog_epoch_str, query);
+
 	    /* latest_handled_binlog */
 	    while (*status &&
 		   strncmp(status, latest_handled_binlog_epoch_str,
@@ -4300,19 +4269,7 @@ int do_save_master_pos()
 	    else
 	      die("result does not contain '%s' in '%s'",
 		  latest_handled_binlog_epoch_str, query);
-	    /* latest_applied_binlog_epoch */
-	    while (*status &&
-		   strncmp(status, latest_applied_binlog_epoch_str,
-			   sizeof(latest_applied_binlog_epoch_str)-1))
-	      status++;
-	    if (*status)
-	    {
-	      status+= sizeof(latest_applied_binlog_epoch_str)-1;
-	      latest_applied_binlog_epoch= strtoull(status, (char**) 0, 10);
-	    }
-	    else
-	      die("result does not contain '%s' in '%s'",
-		  latest_applied_binlog_epoch_str, query);
+
 	    if (count == 0)
 	      start_epoch= latest_trans_epoch;
 	    break;

=== modified file 'mysql-test/suite/ndb/r/ndb_update_no_read.result'
--- a/mysql-test/suite/ndb/r/ndb_update_no_read.result	2010-12-01 12:04:27 +0000
+++ b/mysql-test/suite/ndb/r/ndb_update_no_read.result	2011-06-09 12:14:22 +0000
@@ -294,9 +294,10 @@ affected rows: 1
 # 1 warning
 
 update t1 set b='one plus one' where a=2;
+affected rows: 1
+info: Rows matched: 1  Changed: 1  Warnings: 1
 Warnings:
 Warning	1265	Data truncated for column 'b' at row 1
-affected rows: 1
 @ndb_execute_count:=VARIABLE_VALUE-@ndb_init_execute_count
 1
 affected rows: 1
@@ -367,9 +368,10 @@ affected rows: 1
 begin;
 affected rows: 0
 update t1 set b='one plus one' where a=2;
+affected rows: 1
+info: Rows matched: 1  Changed: 1  Warnings: 1
 Warnings:
 Warning	1265	Data truncated for column 'b' at row 1
-affected rows: 1
 commit;
 affected rows: 0
 @ndb_execute_count:=VARIABLE_VALUE-@ndb_init_execute_count

=== modified file 'mysql-test/suite/ndb/t/disabled.def'
--- a/mysql-test/suite/ndb/t/disabled.def	2011-05-09 08:49:19 +0000
+++ b/mysql-test/suite/ndb/t/disabled.def	2011-06-09 12:14:22 +0000
@@ -16,9 +16,6 @@ ndb_partition_error2 : Bug#40989 ndb_par
 ndb_cache_trans           : Bug#42197 Query cache and autocommit
 ndb_disconnect_ddl        : Bug#31853 flaky testcase...
 
-ndb_bulk_delete		 : SEAGULL rbwr
-ndb_update_no_read	 : SEAGULL rbwr
-
 ndb_condition_pushdown	 : SEAGULL
 
 ndb_dd_disk2memory	 : SEAGULL alter

=== modified file 'sql/ha_ndbcluster.cc'
--- a/sql/ha_ndbcluster.cc	2011-06-08 12:21:27 +0000
+++ b/sql/ha_ndbcluster.cc	2011-06-10 10:27:38 +0000
@@ -4628,26 +4628,79 @@ int ha_ndbcluster::bulk_update_row(const
 
 int ha_ndbcluster::exec_bulk_update(uint *dup_key_found)
 {
+  NdbTransaction* trans= m_thd_ndb->trans;
   DBUG_ENTER("ha_ndbcluster::exec_bulk_update");
   *dup_key_found= 0;
-  if (m_thd_ndb->m_unsent_bytes &&
-      !thd_allow_batch(table->in_use) &&
-      (!m_thd_ndb->m_handler ||
-       m_blobs_pending))
+
+  // m_handler must be NULL or point to _this_ handler instance
+  assert(m_thd_ndb->m_handler == NULL || m_thd_ndb->m_handler == this);
+
+  if (m_thd_ndb->m_handler &&
+      m_read_before_write_removal_possible)
   {
+    /*
+      This is an autocommit involving only one table and rbwr is on
+
+      Commit the autocommit transaction early(before the usual place
+      in ndbcluster_commit) in order to:
+      1) save one round trip, "no-commit+commit" converted to "commit"
+      2) return the correct number of updated and affected rows
+         to the update loop(which will ask handler in rbwr mode)
+    */
+    DBUG_PRINT("info", ("committing auto-commit+rbwr early"));
     uint ignore_count= 0;
-    if (execute_no_commit(m_thd_ndb, m_thd_ndb->trans,
-                          m_ignore_no_key || m_read_before_write_removal_used,
-                          &ignore_count) != 0)
+    const int ignore_error= 1;
+    if (execute_commit(m_thd_ndb, trans,
+                       m_thd_ndb->m_force_send, ignore_error,
+                       &ignore_count) != 0)
     {
       no_uncommitted_rows_execute_failure();
-      DBUG_RETURN(ndb_err(m_thd_ndb->trans));
+      DBUG_RETURN(ndb_err(trans));
     }
+    DBUG_PRINT("info", ("ignore_count: %u", ignore_count));
     assert(m_rows_changed >= ignore_count);
     assert(m_rows_updated >= ignore_count);
     m_rows_changed-= ignore_count;
     m_rows_updated-= ignore_count;
+    DBUG_RETURN(0);
+  }
+
+  if (m_thd_ndb->m_unsent_bytes == 0)
+  {
+    DBUG_PRINT("exit", ("skip execute - no unsent bytes"));
+    DBUG_RETURN(0);
   }
+
+  if (thd_allow_batch(table->in_use))
+  {
+    /*
+      Turned on by @@transaction_allow_batching=ON
+      or implicitly by slave exec thread
+    */
+    DBUG_PRINT("exit", ("skip execute - transaction_allow_batching is ON"));
+    DBUG_RETURN(0);
+  }
+
+  if (m_thd_ndb->m_handler &&
+      !m_blobs_pending)
+  {
+    // Execute at commit time(in 'ndbcluster_commit') to save a round trip
+    DBUG_PRINT("exit", ("skip execute - simple autocommit"));
+    DBUG_RETURN(0);
+  }
+
+  uint ignore_count= 0;
+  if (execute_no_commit(m_thd_ndb, trans,
+                        m_ignore_no_key || m_read_before_write_removal_used,
+                        &ignore_count) != 0)
+  {
+    no_uncommitted_rows_execute_failure();
+    DBUG_RETURN(ndb_err(trans));
+  }
+  assert(m_rows_changed >= ignore_count);
+  assert(m_rows_updated >= ignore_count);
+  m_rows_changed-= ignore_count;
+  m_rows_updated-= ignore_count;
   DBUG_RETURN(0);
 }
 
@@ -4983,25 +5036,76 @@ bool ha_ndbcluster::start_bulk_delete()
 
 int ha_ndbcluster::end_bulk_delete()
 {
+  NdbTransaction* trans= m_thd_ndb->trans;
   DBUG_ENTER("end_bulk_delete");
   assert(m_is_bulk_delete); // Don't allow end() without start()
   m_is_bulk_delete = false;
 
-  if (m_thd_ndb->m_unsent_bytes &&
-      !thd_allow_batch(table->in_use) &&
-      !m_thd_ndb->m_handler)
+  // m_handler must be NULL or point to _this_ handler instance
+  assert(m_thd_ndb->m_handler == NULL || m_thd_ndb->m_handler == this);
+
+  if (m_thd_ndb->m_handler &&
+      m_read_before_write_removal_possible)
   {
+    /*
+      This is an autocommit involving only one table and rbwr is on
+
+      Commit the autocommit transaction early(before the usual place
+      in ndbcluster_commit) in order to:
+      1) save one round trip, "no-commit+commit" converted to "commit"
+      2) return the correct number of updated and affected rows
+         to the delete loop(which will ask handler in rbwr mode)
+    */
+    DBUG_PRINT("info", ("committing auto-commit+rbwr early"));
     uint ignore_count= 0;
-    if (execute_no_commit(m_thd_ndb, m_thd_ndb->trans,
-                          m_ignore_no_key || m_read_before_write_removal_used,
-                          &ignore_count) != 0)
+    const int ignore_error= 1;
+    if (execute_commit(m_thd_ndb, trans,
+                       m_thd_ndb->m_force_send, ignore_error,
+                       &ignore_count) != 0)
     {
       no_uncommitted_rows_execute_failure();
-      DBUG_RETURN(ndb_err(m_thd_ndb->trans));
+      DBUG_RETURN(ndb_err(trans));
     }
+    DBUG_PRINT("info", ("ignore_count: %u", ignore_count));
     assert(m_rows_deleted >= ignore_count);
     m_rows_deleted-= ignore_count;
+    DBUG_RETURN(0);
+  }
+
+  if (m_thd_ndb->m_unsent_bytes == 0)
+  {
+    DBUG_PRINT("exit", ("skip execute - no unsent bytes"));
+    DBUG_RETURN(0);
+  }
+
+  if (thd_allow_batch(table->in_use))
+  {
+    /*
+      Turned on by @@transaction_allow_batching=ON
+      or implicitly by slave exec thread
+    */
+    DBUG_PRINT("exit", ("skip execute - transaction_allow_batching is ON"));
+    DBUG_RETURN(0);
   }
+
+  if (m_thd_ndb->m_handler)
+  {
+    // Execute at commit time(in 'ndbcluster_commit') to save a round trip
+    DBUG_PRINT("exit", ("skip execute - simple autocommit"));
+    DBUG_RETURN(0);
+  }
+
+  uint ignore_count= 0;
+  if (execute_no_commit(m_thd_ndb, trans,
+                        m_ignore_no_key || m_read_before_write_removal_used,
+                        &ignore_count) != 0)
+  {
+    no_uncommitted_rows_execute_failure();
+    DBUG_RETURN(ndb_err(trans));
+  }
+
+  assert(m_rows_deleted >= ignore_count);
+  m_rows_deleted-= ignore_count;
   DBUG_RETURN(0);
 }
 
@@ -6183,39 +6287,49 @@ bool ha_ndbcluster::read_before_write_re
 {
   THD *thd= table->in_use;
   DBUG_ENTER("read_before_write_removal_possible");
-  /*
-    We need to verify a large number of things before accepting to remove
-    the read before the update. We cannot avoid read before when primary
-    key is updated, when a unique key is updated, when a BLOB is updated,
-    for deletes on tables with BLOB's it is also not possible to avoid
-    the read before the update and finally it is necessary that the
-    update expressions only contain constant expressions.
-  */
-  if (uses_blob_value(table->write_set) ||
-      (thd->lex->sql_command == SQLCOM_DELETE &&
-       table_share->blob_fields) ||
-      (table_share->primary_key != MAX_KEY &&
-       bitmap_is_overlapping(table->write_set, m_pk_bitmap_p)))
+
+  if (uses_blob_value(table->write_set))
   {
-    DBUG_RETURN(FALSE);
+    DBUG_PRINT("exit", ("No! Blob field in write_set"));
+    DBUG_RETURN(false);
+  }
+
+  if (thd->lex->sql_command == SQLCOM_DELETE &&
+      table_share->blob_fields)
+  {
+    DBUG_PRINT("exit", ("No! DELETE from table with blob(s)"));
+    DBUG_RETURN(false);
   }
+
+  if (table_share->primary_key == MAX_KEY)
+  {
+    DBUG_PRINT("exit", ("No! Table with hidden key"));
+    DBUG_RETURN(false);
+  }
+
+  if (bitmap_is_overlapping(table->write_set, m_pk_bitmap_p))
+  {
+    DBUG_PRINT("exit", ("No! Updating primary key"));
+    DBUG_RETURN(false);
+  }
+
   if (m_has_unique_index)
   {
-    KEY *key;
     for (uint i= 0; i < table_share->keys; i++)
     {
-      key= table->key_info + i;
+      const KEY* key= table->key_info + i;
       if ((key->flags & HA_NOSAME) &&
           bitmap_is_overlapping(table->write_set,
                                 m_key_fields[i]))
       {
-        DBUG_RETURN(FALSE);
+        DBUG_PRINT("exit", ("No! Unique key %d is updated", i));
+        DBUG_RETURN(false);
       }
     }
   }
-  DBUG_PRINT("info", ("read_before_write_removal_possible TRUE"));
   m_read_before_write_removal_possible= TRUE;
-  DBUG_RETURN(TRUE);
+  DBUG_PRINT("exit", ("Yes, rbwr is possible!"));
+  DBUG_RETURN(true);
 }
 
 
@@ -7079,58 +7193,18 @@ int ndbcluster_commit(handlerton *hton, 
     if (thd_ndb->m_handler &&
         thd_ndb->m_handler->m_read_before_write_removal_possible)
     {
-#ifndef NDB_WITHOUT_READ_BEFORE_WRITE_REMOVAL
-      /* Autocommit with read-before-write removal
-       * Some operations in this autocommitted statement have not
-       * yet been executed
-       * They will be executed here as part of commit, and the results
-       * (rowcount, message) sent back to the client will then be modified 
-       * according to how the execution went.
-       * This saves a single roundtrip in the autocommit case
-       */
-      uint ignore_count= 0;
-      res= execute_commit(thd_ndb, trans, THDVAR(thd, force_send),
-                          TRUE, &ignore_count);
-      if (!res && ignore_count)
-      {
-        DBUG_PRINT("info", ("AutoCommit + RBW removal, ignore_count=%u",
-                            ignore_count));
-        /* We have some rows to ignore, modify recorded results,
-         * regenerate result message as required.
-         */
-        thd->row_count_func-= ignore_count;
-
-        ha_rows affected= 0;
-        char buff[ STRING_BUFFER_USUAL_SIZE ];
-        const char* msg= NULL;
-        if (thd->lex->sql_command == SQLCOM_DELETE)
-        {
-          assert(thd_ndb->m_handler->m_rows_deleted >= ignore_count);
-          affected= (thd_ndb->m_handler->m_rows_deleted-= ignore_count);
-        }
-        else
-        {
-          DBUG_PRINT("info", ("Update : message was %s", 
-                              thd->main_da.message()));
-          assert(thd_ndb->m_handler->m_rows_updated >= ignore_count);
-          affected= (thd_ndb->m_handler->m_rows_updated-= ignore_count);
-          /* For update in this scenario, we set found and changed to be 
-           * the same as affected
-           * Regenerate the update message
-           */
-          sprintf(buff, ER(ER_UPDATE_INFO), (ulong)affected, (ulong)affected,
-                  (ulong) thd->cuted_fields);
-          msg= buff;
-          DBUG_PRINT("info", ("Update : message changed to %s",
-                              msg));
-        }
-
-        /* Modify execution result + optionally message */
-        thd->main_da.modify_affected_rows(affected, msg);
+      /*
+        This is an autocommit involving only one table and
+        rbwr is on, thus the transaction has already been
+        committed in exec_bulk_update() or end_bulk_delete()
+      */
+      DBUG_PRINT("info", ("autocommit+rbwr, transaction already comitted"));
+      if (trans->commitStatus() != NdbTransaction::Committed)
+      {
+        sql_print_error("found uncomitted autocommit+rbwr transaction, "
+                        "commit status: %d", trans->commitStatus());
+        abort();
       }
-#else
-      abort(); // Should never come here without rbwr support
-#endif
     }
     else
       res= execute_commit(thd_ndb, trans, THDVAR(thd, force_send), FALSE);
@@ -9563,19 +9637,6 @@ ha_ndbcluster::~ha_ndbcluster() 
   DBUG_VOID_RETURN;
 }
 
-#ifndef NDB_WITHOUT_READ_BEFORE_WRITE_REMOVAL
-void
-ha_ndbcluster::column_bitmaps_signal(uint sig_type)
-{
-  DBUG_ENTER("column_bitmaps_signal");
-  DBUG_PRINT("enter", ("read_set: 0x%lx  write_set: 0x%lx",
-             (long) table->read_set->bitmap[0],
-             (long) table->write_set->bitmap[0]));
-  if (sig_type & HA_COMPLETE_TABLE_READ_BITMAP)
-    bitmap_copy(&m_save_read_set, table->read_set);
-  DBUG_VOID_RETURN;
-}
-#endif
 
 /**
   Open a table for further use
@@ -9599,11 +9660,6 @@ int ha_ndbcluster::open(const char *name
   DBUG_PRINT("enter", ("name: %s  mode: %d  test_if_locked: %d",
                        name, mode, test_if_locked));
 
-  if (bitmap_init(&m_save_read_set, NULL, table_share->fields, FALSE))
-  {
-    DBUG_RETURN(1);
-  }
-
   if (table_share->primary_key != MAX_KEY)
   {
     /*
@@ -9889,7 +9945,6 @@ void ha_ndbcluster::local_close(THD *thd
     my_free((char*)m_key_fields, MYF(0));
     m_key_fields= NULL;
   }
-  bitmap_free(&m_save_read_set);
   if (m_share)
   {
     /* ndb_share reference handler free */
@@ -12329,40 +12384,20 @@ ha_ndbcluster::null_value_index_search(K
 
 void ha_ndbcluster::check_read_before_write_removal()
 {
-  bool use_removal= TRUE;
   DBUG_ENTER("check_read_before_write_removal");
-  DBUG_ASSERT(m_read_before_write_removal_possible);
-  /*
-    We are doing an update or delete and it is possible that we
-    can ignore the read before the update or delete. This is
-    possible here since we are not updating the primary key and
-    if the index used is unique or primary and if the WHERE clause
-    only involves fields from this index we are ok to go. At this
-    moment we can only updates where all SET expressions are
-    constants. Thus no read set will come from SET expressions.
-  */
-  if (table_share->primary_key == active_index)
-  {
-    if (!bitmap_cmp(&m_save_read_set, m_pk_bitmap_p))
-      use_removal= FALSE;
-  }
-  else
-  {
-    KEY *key= table->key_info + active_index;
-    if (!(key->flags & HA_NOSAME))
-    {
-      /* Optimisation not applicable on non-unique indexes */
-      use_removal= FALSE;
-    }
-    else if (!bitmap_cmp(&m_save_read_set,
-                         m_key_fields[active_index]))
-    {
-      use_removal= FALSE;
-    }
-  }
-  m_read_before_write_removal_used= use_removal;
-  DBUG_PRINT("info", ("m_read_before_write_removal_used: %d",
-                      m_read_before_write_removal_used));
+
+  /* Must have determined that rbwr is possible */
+  assert(m_read_before_write_removal_possible);
+  m_read_before_write_removal_used= true;
+
+  /* Can't use on table with hidden primary key */
+  assert(table_share->primary_key != MAX_KEY);
+
+  /* Index must be unique */
+  DBUG_PRINT("info", ("using index %d", active_index));
+  const KEY *key= table->key_info + active_index;
+  assert((key->flags & HA_NOSAME));
+
   DBUG_VOID_RETURN;
 }
 

=== modified file 'sql/ha_ndbcluster.h'
--- a/sql/ha_ndbcluster.h	2011-06-08 12:21:27 +0000
+++ b/sql/ha_ndbcluster.h	2011-06-10 10:27:38 +0000
@@ -188,9 +188,6 @@ class ha_ndbcluster: public handler
   ha_ndbcluster(handlerton *hton, TABLE_SHARE *table);
   ~ha_ndbcluster();
 
-#ifndef NDB_WITHOUT_READ_BEFORE_WRITE_REMOVAL
-  void column_bitmaps_signal(uint sig_type);
-#endif
   int open(const char *name, int mode, uint test_if_locked);
   int close(void);
   void local_close(THD *thd, bool release_metadata);
@@ -248,6 +245,7 @@ class ha_ndbcluster: public handler
 #endif
   void get_dynamic_partition_info(PARTITION_STATS *stat_info, uint part_id);
   uint32 calculate_key_hash_value(Field **field_array);
+  bool read_before_write_removal_supported() const { return true; }
   bool read_before_write_removal_possible();
   ha_rows read_before_write_removal_rows_written(void) const;
   int extra(enum ha_extra_function operation);
@@ -637,7 +635,6 @@ private:
   int m_current_range_no;
 
   MY_BITMAP **m_key_fields;
-  MY_BITMAP m_save_read_set;
   // NdbRecAttr has no reference to blob
   NdbValue m_value[NDB_MAX_ATTRIBUTES_IN_TABLE];
   Uint64 m_ref;

=== modified file 'sql/ha_ndbcluster_glue.h'
--- a/sql/ha_ndbcluster_glue.h	2011-03-28 08:30:40 +0000
+++ b/sql/ha_ndbcluster_glue.h	2011-06-10 10:27:38 +0000
@@ -67,9 +67,6 @@ bool close_cached_tables(THD *thd, TABLE
 /* Online alter table not supported */
 #define NDB_WITHOUT_ONLINE_ALTER
 
-/* Read before write removal not supported */
-#define NDB_WITHOUT_READ_BEFORE_WRITE_REMOVAL
-
 /* thd has no version field anymore */
 #define NDB_THD_HAS_NO_VERSION
 

=== modified file 'sql/handler.h'
--- a/sql/handler.h	2011-05-20 11:50:50 +0000
+++ b/sql/handler.h	2011-06-10 10:27:38 +0000
@@ -1783,6 +1783,30 @@ public:
   virtual int extra_opt(enum ha_extra_function operation, ulong cache_size)
   { return extra(operation); }
 
+#ifndef MCP_WL5906
+  /*
+    Informs the handler if this handler support read removal
+    (could use table_flags, but patch is smaller this way)
+   */
+  virtual bool read_before_write_removal_supported(void) const
+  { return false; }
+
+  /*
+    Informs handler that it is possible to optimise away the real read
+    operation from the handler for the current table and instead
+    use a generated read to optimise simple UPDATE and DELETEs.
+  */
+  virtual bool read_before_write_removal_possible(void)
+  { return false; }
+
+  /*
+    Return the number of rows the handler has written while using
+    read before write removal
+   */
+  virtual ha_rows read_before_write_removal_rows_written(void) const
+  { DBUG_ASSERT(0); return (ha_rows) 0; }
+#endif
+
   /**
     In an UPDATE or DELETE, if the row under the cursor was locked by another
     transaction, and the engine used an optimistic read of the last

=== modified file 'sql/sql_delete.cc'
--- a/sql/sql_delete.cc	2011-05-20 09:44:48 +0000
+++ b/sql/sql_delete.cc	2011-06-10 10:27:38 +0000
@@ -59,6 +59,9 @@ bool mysql_delete(THD *thd, TABLE_LIST *
   bool          const_cond_result;
   ha_rows	deleted= 0;
   bool          reverse= FALSE;
+#ifndef MCP_WL5906
+  bool          read_removal= false;
+#endif
   bool          skip_record;
   ORDER *order= (ORDER *) ((order_list && order_list->elements) ?
                            order_list->first : NULL);
@@ -293,6 +296,33 @@ bool mysql_delete(THD *thd, TABLE_LIST *
   else
     will_batch= !table->file->start_bulk_delete();
 
+#ifndef MCP_WL5906
+  /*
+    Read removal is possible if the selected quick read
+    method is using full unique index
+  */
+  if (select && select->quick &&
+      will_batch &&
+      !using_limit &&
+      table->file->read_before_write_removal_supported())
+  {
+    const uint idx = select->quick->index;
+    DBUG_PRINT("rbwr", ("checking index: %d", idx));
+    const KEY *key= table->key_info + idx;
+    if ((key->flags & HA_NOSAME) == HA_NOSAME)
+    {
+      DBUG_PRINT("rbwr", ("index is unique"));
+      bitmap_clear_all(&table->tmp_set);
+      table->mark_columns_used_by_index_no_reset(idx, &table->tmp_set);
+      if (bitmap_cmp(&table->tmp_set, table->read_set))
+      {
+        DBUG_PRINT("rbwr", ("using whole index, rbwr possible"));
+        read_removal=
+          table->file->read_before_write_removal_possible();
+      }
+    }
+  }
+#endif
 
   table->mark_columns_needed_for_delete();
 
@@ -355,6 +385,15 @@ bool mysql_delete(THD *thd, TABLE_LIST *
       table->file->print_error(loc_error,MYF(0));
     error=1;
   }
+#ifndef MCP_WL5906
+  if (read_removal)
+  {
+    /* Only handler knows how many records really was written */
+    DBUG_PRINT("rbwr", ("old deleted: %ld", (long)deleted));
+    deleted= table->file->read_before_write_removal_rows_written();
+    DBUG_PRINT("rbwr", ("really deleted: %ld", (long)deleted));
+  }
+#endif
   THD_STAGE_INFO(thd, stage_end);
   end_read_record(&info);
   if (options & OPTION_QUICK)

=== modified file 'sql/sql_update.cc'
--- a/sql/sql_update.cc	2011-05-20 11:50:50 +0000
+++ b/sql/sql_update.cc	2011-06-10 10:27:38 +0000
@@ -158,6 +158,39 @@ static bool check_fields(THD *thd, List<
 }
 
 
+#ifndef MCP_WL5906
+/*
+  Check if all expressions in list are constant expressions
+
+  SYNOPSIS
+    check_constant_expressions()
+    values                       List of expressions
+
+  RETURN
+    TRUE                         Only constant expressions
+    FALSE                        At least one non-constant expression
+*/
+
+static bool check_constant_expressions(List<Item> &values)
+{
+  Item *value;
+  List_iterator_fast<Item> v(values);
+  DBUG_ENTER("check_constant_expressions");
+
+  while ((value= v++))
+  {
+    if (!value->const_item())
+    {
+      DBUG_PRINT("exit", ("expression is not constant"));
+      DBUG_RETURN(FALSE);
+    }
+  }
+  DBUG_PRINT("exit", ("expression is constant"));
+  DBUG_RETURN(TRUE);
+}
+#endif
+
+
 /**
   Re-read record if more columns are needed for error message.
 
@@ -423,6 +456,38 @@ int mysql_update(THD *thd,
     DBUG_RETURN(0);
   }
 
+#ifndef MCP_WL5906
+  /*
+    Read removal is possible if the selected quick read
+    method is using full unique index
+
+    NOTE! table->read_set currently holds the columns which are
+    used for the WHERE clause(this info is most likely already
+    available in select->quick, but where?)
+  */
+  bool read_removal= false;
+  if (select && select->quick &&
+      !ignore &&
+      !using_limit &&
+      table->file->read_before_write_removal_supported())
+  {
+    const uint idx= select->quick->index;
+    DBUG_PRINT("rbwr", ("checking index: %d", idx));
+    const KEY *key= table->key_info + idx;
+    if ((key->flags & HA_NOSAME) == HA_NOSAME)
+    {
+      DBUG_PRINT("rbwr", ("index is unique"));
+      bitmap_clear_all(&table->tmp_set);
+      table->mark_columns_used_by_index_no_reset(idx, &table->tmp_set);
+      if (bitmap_cmp(&table->tmp_set, table->read_set))
+      {
+        DBUG_PRINT("rbwr", ("using full index, rbwr possible"));
+        read_removal= true;
+      }
+    }
+  }
+#endif
+
   /* If running in safe sql mode, don't allow updates without keys */
   if (table->quick_keys.is_clear_all())
   {
@@ -595,6 +660,13 @@ int mysql_update(THD *thd,
     }
     if (table->key_read)
       table->restore_column_maps_after_mark_index();
+
+#ifndef MCP_WL5906
+    /* Rows are already read -> not possible to remove */
+    DBUG_PRINT("rbwr", ("rows are already read, turning off rbwr"));
+    read_removal= false;
+#endif
+
   }
 
   if (ignore)
@@ -634,6 +706,16 @@ int mysql_update(THD *thd,
   else
     will_batch= !table->file->start_bulk_update();
 
+#ifndef MCP_WL5906
+  if (read_removal &&
+      will_batch &&
+      check_constant_expressions(values))
+  {
+    assert(select && select->quick);
+    read_removal= table->file->read_before_write_removal_possible();
+  }
+#endif
+
   /*
     Assure that we can use position()
     if we need to create an error message.
@@ -840,6 +922,22 @@ int mysql_update(THD *thd,
     table->file->end_bulk_update();
   table->file->try_semi_consistent_read(0);
 
+#ifndef MCP_WL5906
+  if (read_removal)
+  {
+    /* Only handler knows how many records really was written */
+    DBUG_PRINT("rbwr", ("adjusting updated: %ld, found: %ld",
+                        (long)updated, (long)found));
+
+    updated= table->file->read_before_write_removal_rows_written();
+    if (!records_are_comparable(table))
+      found= updated;
+
+    DBUG_PRINT("rbwr", ("really updated: %ld, found: %ld",
+                        (long)updated, (long)found));
+  }
+#endif
+
   if (!transactional_table && updated > 0)
     thd->transaction.stmt.mark_modified_non_trans_table();
 

No bundle (reason: useless for push emails).
Thread
bzr push into mysql-trunk-cluster branch (magnus.blaudd:3326 to 3327) magnus.blaudd10 Jun