List:Commits« Previous MessageNext Message »
From:Martin Skold Date:June 12 2006 12:23pm
Subject:bk commit into 5.1 tree (mskold:1.2183) BUG#18184
View as plain text  
Below is the list of changes that have just been committed into a local
5.1 repository of marty. When marty does a push these changes will
be propagated to the main repository and, within 24 hours after the
push, to the public repository.
For information on how to access the public repository
see http://dev.mysql.com/doc/mysql/en/installing-source-tree.html

ChangeSet
  1.2183 06/06/12 14:23:21 mskold@stripped +8 -0
  sql_delete.cc:
    Bug #18864  TRUNCATE TABLE doesn't reset AUTO_INCREMENT value on ndb table: locked lock_OPEN mutex to support TRUNCATE with re-create and cluster binlog
  Many files:
    Bug #18864  TRUNCATE TABLE doesn't reset AUTO_INCREMENT value on ndb table: adaption to MySQ Cluster replication
  ndb_lock.result, ha_ndbcluster.cc:
    Fix for Bug #18184  SELECT ... FOR UPDATE does not work..: Adaption to 5.1 code
  NdbDictionaryImpl.hpp:
    Fix of bad merge

  sql/sql_delete.cc
    1.184 06/06/12 14:21:16 mskold@stripped +2 -0
    Bug #18864  TRUNCATE TABLE doesn't reset AUTO_INCREMENT value on ndb table: locked lock_OPEN mutex to support TRUNCATE with re-create and cluster binlog

  sql/ha_ndbcluster_binlog.h
    1.14 06/06/12 14:15:56 mskold@stripped +2 -1
    Bug #18864  TRUNCATE TABLE doesn't reset AUTO_INCREMENT value on ndb table: adaption to MySQ Cluster replication

  mysql-test/t/rpl_ndb_basic.test
    1.6 06/06/12 14:15:56 mskold@stripped +8 -1
    Bug #18864  TRUNCATE TABLE doesn't reset AUTO_INCREMENT value on ndb table: adaption to MySQ Cluster replication

  mysql-test/r/rpl_ndb_basic.result
    1.4 06/06/12 14:15:56 mskold@stripped +15 -0
    Bug #18864  TRUNCATE TABLE doesn't reset AUTO_INCREMENT value on ndb table: adaption to MySQ Cluster replication

  sql/ha_ndbcluster_binlog.cc
    1.64 06/06/12 14:15:55 mskold@stripped +4 -0
    Bug #18864  TRUNCATE TABLE doesn't reset AUTO_INCREMENT value on ndb table: adaption to MySQ Cluster replication

  mysql-test/r/ndb_lock.result
    1.9 06/06/12 14:13:02 mskold@stripped +80 -0
    Fix for Bug #18184  SELECT ... FOR UPDATE does not work..: Adaption to 5.1 code

  sql/ha_ndbcluster.cc
    1.319 06/06/12 14:13:01 mskold@stripped +80 -8
    Fix for Bug #18184  SELECT ... FOR UPDATE does not work..: Adaption to 5.1 code

  storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp
    1.64 06/06/12 14:09:36 mskold@stripped +1 -1
    Fix of bad merge

# This is a BitKeeper patch.  What follows are the unified diffs for the
# set of deltas contained in the patch.  The rest of the patch, the part
# that BitKeeper cares about, is below these diffs.
# User:	mskold
# Host:	linux.site
# Root:	/home/marty/MySQL/mysql-5.1

--- 1.183/sql/sql_delete.cc	2006-06-04 20:05:17 +02:00
+++ 1.184/sql/sql_delete.cc	2006-06-12 14:21:16 +02:00
@@ -954,8 +954,10 @@ bool mysql_truncate(THD *thd, TABLE_LIST
   // crashes, replacement works.  *(path + path_length - reg_ext_length)=
   // '\0';
   path[path_length - reg_ext_length] = 0;
+  VOID(pthread_mutex_lock(&LOCK_open));
   error= ha_create_table(thd, path, table_list->db, table_list->table_name,
                          &create_info, 1);
+  VOID(pthread_mutex_unlock(&LOCK_open));
   query_cache_invalidate3(thd, table_list, 0);
 
 end:

--- 1.63/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp	2006-06-12 11:33:54 +02:00
+++ 1.64/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp	2006-06-12 14:09:36 +02:00
@@ -581,7 +581,7 @@ public:
   int createIndex(NdbIndexImpl &ix, NdbTableImpl & tab);
   int dropIndex(const char * indexName, 
 		const char * tableName);
-  int dropIndex(NdbIndexImpl &);
+  int dropIndex(NdbIndexImpl &, const char * tableName);
   NdbTableImpl * getIndexTable(NdbIndexImpl * index, 
 			       NdbTableImpl * table);
 

--- 1.318/sql/ha_ndbcluster.cc	2006-06-12 11:33:54 +02:00
+++ 1.319/sql/ha_ndbcluster.cc	2006-06-12 14:13:01 +02:00
@@ -1402,7 +1402,8 @@ int ha_ndbcluster::get_ndb_lock_type(enu
 {
   if (type >= TL_WRITE_ALLOW_WRITE)
     return NdbOperation::LM_Exclusive;
-  if (uses_blob_value())
+  if (type ==  TL_READ_WITH_SHARED_LOCKS ||
+      uses_blob_value())
     return NdbOperation::LM_Read;
   return NdbOperation::LM_CommittedRead;
 }
@@ -1947,7 +1948,30 @@ inline int ha_ndbcluster::fetch_next(Ndb
   int check;
   NdbTransaction *trans= m_active_trans;
   
-  bool contact_ndb= m_lock.type < TL_WRITE_ALLOW_WRITE;
+  if (m_lock_tuple)
+  {
+    /*
+      Lock level m_lock.type either TL_WRITE_ALLOW_WRITE
+      (SELECT FOR UPDATE) or TL_READ_WITH_SHARED_LOCKS (SELECT
+      LOCK WITH SHARE MODE) and row was not explictly unlocked 
+      with unlock_row() call
+    */
+      NdbConnection *trans= m_active_trans;
+      NdbOperation *op;
+      // Lock row
+      DBUG_PRINT("info", ("Keeping lock on scanned row"));
+      
+      if (!(op= m_active_cursor->lockCurrentTuple()))
+      {
+	m_lock_tuple= false;
+	ERR_RETURN(trans->getNdbError());
+      }
+      m_ops_pending++;
+  }
+  m_lock_tuple= false;
+  
+  bool contact_ndb= m_lock.type < TL_WRITE_ALLOW_WRITE &&
+                    m_lock.type != TL_READ_WITH_SHARED_LOCKS;;
   do {
     DBUG_PRINT("info", ("Call nextResult, contact_ndb: %d", contact_ndb));
     /*
@@ -1963,6 +1987,13 @@ inline int ha_ndbcluster::fetch_next(Ndb
     
     if ((check= cursor->nextResult(contact_ndb, m_force_send)) == 0)
     {
+      /*
+	Explicitly lock tuple if "select for update" or
+	"select lock in share mode"
+      */
+      m_lock_tuple= (m_lock.type == TL_WRITE_ALLOW_WRITE
+		     || 
+		     m_lock.type == TL_READ_WITH_SHARED_LOCKS);
       DBUG_RETURN(0);
     } 
     else if (check == 1 || check == 2)
@@ -2258,9 +2289,10 @@ int ha_ndbcluster::ordered_index_scan(co
     restart= FALSE;
     NdbOperation::LockMode lm=
       (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type);
+   bool need_pk = (lm == NdbOperation::LM_Read);
     if (!(op= trans->getNdbIndexScanOperation(m_index[active_index].index, 
                                               m_table)) ||
-        op->readTuples(lm, 0, parallelism, sorted, descending))
+        op->readTuples(lm, 0, parallelism, sorted, descending, false, need_pk))
       ERR_RETURN(trans->getNdbError());
     if (m_use_partition_function && part_spec != NULL &&
         part_spec->start_part == part_spec->end_part)
@@ -2329,8 +2361,11 @@ int ha_ndbcluster::full_table_scan(byte 
 
   NdbOperation::LockMode lm=
     (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type);
+  bool need_pk = (lm == NdbOperation::LM_Read);
   if (!(op=trans->getNdbScanOperation(m_table)) ||
-      op->readTuples(lm, 0, parallelism))
+      op->readTuples(lm, 
+		     (need_pk)?NdbScanOperation::SF_KeyInfo:0, 
+		     parallelism))
     ERR_RETURN(trans->getNdbError());
   m_active_cursor= op;
 
@@ -2706,6 +2741,7 @@ int ha_ndbcluster::update_row(const byte
     DBUG_PRINT("info", ("Calling updateTuple on cursor"));
     if (!(op= cursor->updateCurrentTuple()))
       ERR_RETURN(trans->getNdbError());
+    m_lock_tuple= false;
     m_ops_pending++;
     if (uses_blob_value())
       m_blobs_pending= TRUE;
@@ -2814,6 +2850,7 @@ int ha_ndbcluster::delete_row(const byte
     DBUG_PRINT("info", ("Calling deleteTuple on cursor"));
     if (cursor->deleteCurrentTuple() != 0)
       ERR_RETURN(trans->getNdbError());     
+    m_lock_tuple= false;
     m_ops_pending++;
 
     if (m_use_partition_function)
@@ -3073,6 +3110,13 @@ int ha_ndbcluster::index_init(uint index
   DBUG_PRINT("enter", ("index: %u  sorted: %d", index, sorted));
   active_index= index;
   m_sorted= sorted;
+  /*
+    Locks are are explicitly released in scan
+    unless m_lock.type == TL_READ_HIGH_PRIORITY
+    and no sub-sequent call to unlock_row()
+  */
+  m_lock_tuple= false;
+    m_lock_tuple= false;
   DBUG_RETURN(0);
 }
 
@@ -3997,6 +4041,22 @@ int ha_ndbcluster::external_lock(THD *th
 }
 
 /*
+  Unlock the last row read in an open scan.
+  Rows are unlocked by default in ndb, but
+  for SELECT FOR UPDATE and SELECT LOCK WIT SHARE MODE
+  locks are kept if unlock_row() is not called.
+*/
+
+void ha_ndbcluster::unlock_row() 
+{
+  DBUG_ENTER("unlock_row");
+
+  DBUG_PRINT("info", ("Unlocking row"));
+  m_lock_tuple= false;
+  DBUG_VOID_RETURN;
+}
+
+/*
   Start a transaction for running a statement if one is not
   already running in a transaction. This will be the case in
   a BEGIN; COMMIT; block
@@ -4413,6 +4473,7 @@ int ha_ndbcluster::create(const char *na
   uint pack_length, length, i, pk_length= 0;
   const void *data, *pack_data;
   bool create_from_engine= (info->table_options & HA_OPTION_CREATE_FROM_ENGINE);
+  bool is_truncate= (current_thd->lex->sql_command == SQLCOM_TRUNCATE);
 
   DBUG_ENTER("ha_ndbcluster::create");
   DBUG_PRINT("enter", ("name: %s", name));
@@ -4421,6 +4482,12 @@ int ha_ndbcluster::create(const char *na
   set_dbname(name);
   set_tabname(name);
 
+  if (is_truncate)
+  {
+    DBUG_PRINT("info", ("Dropping and re-creating table for TRUNCATE"));
+    if ((my_errno= delete_table(name)))
+      DBUG_RETURN(my_errno);
+  }
   table= form;
   if (create_from_engine)
   {
@@ -4665,7 +4732,9 @@ int ha_ndbcluster::create(const char *na
                                share->db, share->table_name,
                                m_table->getObjectId(),
                                m_table->getObjectVersion(),
-                               SOT_CREATE_TABLE, 0, 0, 1);
+                               (is_truncate) ?
+			       SOT_TRUNCATE_TABLE : SOT_CREATE_TABLE, 
+			       0, 0, 1);
       break;
     }
   }
@@ -5200,7 +5269,8 @@ ha_ndbcluster::delete_table(ha_ndbcluste
   */
   int table_dropped= dict->getNdbError().code != 709;
 
-  if (!IS_TMP_PREFIX(table_name) && share)
+  if (!IS_TMP_PREFIX(table_name) && share &&
+      current_thd->lex->sql_command != SQLCOM_TRUNCATE)
   {
     ndbcluster_log_schema_op(thd, share,
                              thd->query, thd->query_length,
@@ -6183,7 +6253,7 @@ static int ndbcluster_init()
 #ifdef HAVE_NDB_BINLOG
     ndbcluster_binlog_init_handlerton();
 #endif
-    h.flags=            HTON_TEMPORARY_NOT_SUPPORTED;
+    h.flags=            HTON_CAN_RECREATE | HTON_TEMPORARY_NOT_SUPPORTED;
   }
 
   if (have_ndbcluster != SHOW_OPTION_YES)
@@ -7430,6 +7500,7 @@ ha_ndbcluster::read_multi_range_first(KE
   byte *end_of_buffer= (byte*)buffer->buffer_end;
   NdbOperation::LockMode lm= 
     (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type);
+  bool need_pk = (lm == NdbOperation::LM_Read);
   const NDBTAB *tab= m_table;
   const NDBINDEX *unique_idx= m_index[active_index].unique_index;
   const NDBINDEX *idx= m_index[active_index].index; 
@@ -7520,7 +7591,8 @@ ha_ndbcluster::read_multi_range_first(KE
           end_of_buffer -= reclength;
         }
         else if ((scanOp= m_active_trans->getNdbIndexScanOperation(idx, tab)) 
-                 &&!scanOp->readTuples(lm, 0, parallelism, sorted, FALSE, TRUE)
+                 &&!scanOp->readTuples(lm, 0, parallelism, sorted, 
+				       FALSE, TRUE, need_pk)
                  &&!generate_scan_filter(m_cond_stack, scanOp)
                  &&!define_read_attrs(end_of_buffer-reclength, scanOp))
         {

--- 1.8/mysql-test/r/ndb_lock.result	2006-06-12 11:33:54 +02:00
+++ 1.9/mysql-test/r/ndb_lock.result	2006-06-12 14:13:02 +02:00
@@ -63,6 +63,86 @@ pk	u	o
 5	5	5
 insert into t1 values (1,1,1);
 drop table t1;
+create table t1 (x integer not null primary key, y varchar(32), z integer, key(z)) engine = ndb;
+insert into t1 values (1,'one',1), (2,'two',2),(3,"three",3);
+begin;
+select * from t1 where x = 1 for update;
+x	y	z
+1	one	1
+begin;
+select * from t1 where x = 2 for update;
+x	y	z
+2	two	2
+select * from t1 where x = 1 for update;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+rollback;
+commit;
+begin;
+select * from t1 where y = 'one' or y = 'three' for update;
+x	y	z
+3	three	3
+1	one	1
+begin;
+select * from t1 where x = 1 for update;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+rollback;
+commit;
+begin;
+select * from t1 where z > 1 and z < 3 for update;
+x	y	z
+2	two	2
+begin;
+select * from t1 where x = 1 for update;
+x	y	z
+1	one	1
+select * from t1 where x = 2 for update;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+rollback;
+commit;
+begin;
+select * from t1 where x = 1 lock in share mode;
+x	y	z
+1	one	1
+begin;
+select * from t1 where x = 1 lock in share mode;
+x	y	z
+1	one	1
+select * from t1 where x = 2 for update;
+x	y	z
+2	two	2
+select * from t1 where x = 1 for update;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+rollback;
+commit;
+begin;
+select * from t1 where y = 'one' or y = 'three' lock in share mode;
+x	y	z
+3	three	3
+1	one	1
+begin;
+select * from t1 where y = 'one' lock in share mode;
+x	y	z
+1	one	1
+select * from t1 where x = 1 for update;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+rollback;
+commit;
+begin;
+select * from t1 where z > 1 and z < 3 lock in share mode;
+x	y	z
+2	two	2
+begin;
+select * from t1 where z = 1 lock in share mode;
+x	y	z
+1	one	1
+select * from t1 where x = 1 for update;
+x	y	z
+1	one	1
+select * from t1 where x = 2 for update;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+rollback;
+commit;
+drop table t1;
 create table t3 (id2 int) engine=ndb;
 lock tables t3 write;
 unlock tables;

--- 1.3/mysql-test/r/rpl_ndb_basic.result	2006-04-19 14:54:31 +02:00
+++ 1.4/mysql-test/r/rpl_ndb_basic.result	2006-06-12 14:15:56 +02:00
@@ -146,4 +146,19 @@ c1	c2
 3	NULL
 4	NULL
 5	NULL
+TRUNCATE t1;
+SELECT count(*) FROM t1;
+count(*)
+0
+INSERT INTO t1 VALUES (101,NULL),(102,NULL),(103,NULL),(104,NULL),(105,NULL),(106,NULL),(107,NULL),(108,NULL),(109,NULL),(1010,NULL);
+SELECT count(*) FROM t1;
+count(*)
+10
+SELECT c1 FROM t1 ORDER BY c1 LIMIT 5;
+c1
+101
+102
+103
+104
+105
 DROP TABLE t1;

--- 1.5/mysql-test/t/rpl_ndb_basic.test	2006-04-19 14:54:32 +02:00
+++ 1.6/mysql-test/t/rpl_ndb_basic.test	2006-06-12 14:15:56 +02:00
@@ -172,7 +172,14 @@ connection slave;
 # here we would get error 1412 prior to bug
 SELECT * FROM t1 ORDER BY c1 LIMIT 5;
 
-
+--connection master
+TRUNCATE t1;
+SELECT count(*) FROM t1;
+INSERT INTO t1 VALUES (101,NULL),(102,NULL),(103,NULL),(104,NULL),(105,NULL),(106,NULL),(107,NULL),(108,NULL),(109,NULL),(1010,NULL);
+--sync_slave_with_master
+connection slave;
+SELECT count(*) FROM t1;
+SELECT c1 FROM t1 ORDER BY c1 LIMIT 5;
 
 # cleanup
 --connection master

--- 1.63/sql/ha_ndbcluster_binlog.cc	2006-06-04 20:05:17 +02:00
+++ 1.64/sql/ha_ndbcluster_binlog.cc	2006-06-12 14:15:55 +02:00
@@ -1232,6 +1232,9 @@ int ndbcluster_log_schema_op(THD *thd, N
   case SOT_LOGFILE_GROUP:
     type_str= "logfile group";
     break;
+  case SOT_TRUNCATE_TABLE:
+    type_str= "truncate table";
+    break;
   default:
     abort(); /* should not happen, programming error */
   }
@@ -1765,6 +1768,7 @@ ndb_binlog_thread_handle_schema_event(TH
           post_epoch_unlock= 1;
           break;
         case SOT_CREATE_TABLE:
+	case SOT_TRUNCATE_TABLE:
           pthread_mutex_lock(&LOCK_open);
           if (ndb_create_table_from_engine(thd, schema->db, schema->name))
           {

--- 1.13/sql/ha_ndbcluster_binlog.h	2006-05-31 01:52:08 +02:00
+++ 1.14/sql/ha_ndbcluster_binlog.h	2006-06-12 14:15:56 +02:00
@@ -49,7 +49,8 @@ enum SCHEMA_OP_TYPE
   SOT_CLEAR_SLOCK= 7,
   SOT_TABLESPACE= 8,
   SOT_LOGFILE_GROUP= 9,
-  SOT_RENAME_TABLE= 10
+  SOT_RENAME_TABLE= 10,
+  SOT_TRUNCATE_TABLE= 11
 };
 
 const uint max_ndb_nodes= 64; /* multiple of 32 */
Thread
bk commit into 5.1 tree (mskold:1.2183) BUG#18184Martin Skold12 Jun