List:Commits« Previous MessageNext Message »
From:tomas Date:January 31 2006 8:34pm
Subject:bk commit into 5.1 tree (tomas:1.2098)
View as plain text  
Below is the list of changes that have just been committed into a local
5.1 repository of tomas. When tomas does a push these changes will
be propagated to the main repository and, within 24 hours after the
push, to the public repository.
For information on how to access the public repository
see http://dev.mysql.com/doc/mysql/en/installing-source-tree.html

ChangeSet
  1.2098 06/01/31 21:34:13 tomas@stripped +2 -0
  Merge tulin@stripped:/home/bk/mysql-5.1-new
  into  poseidon.ndb.mysql.com:/home/tomas/mysql-5.1-new

  sql/ha_ndbcluster_binlog.cc
    1.10 06/01/31 21:34:00 tomas@stripped +0 -0
    Auto merged

  sql/ha_ndbcluster.cc
    1.251 06/01/31 21:33:59 tomas@stripped +0 -0
    Auto merged

# This is a BitKeeper patch.  What follows are the unified diffs for the
# set of deltas contained in the patch.  The rest of the patch, the part
# that BitKeeper cares about, is below these diffs.
# User:	tomas
# Host:	poseidon.ndb.mysql.com
# Root:	/home/tomas/mysql-5.1-new/RESYNC

--- 1.250/sql/ha_ndbcluster.cc	2006-01-31 19:57:50 +01:00
+++ 1.251/sql/ha_ndbcluster.cc	2006-01-31 21:33:59 +01:00
@@ -466,19 +466,35 @@
     #   The mapped error code
 */
 
-void
+int
 ha_ndbcluster::invalidate_dictionary_cache(TABLE_SHARE *share, Ndb *ndb,
-					   const char *tabname, bool global)
+                                           const char *dbname, const char *tabname,
+                                           bool global)
 {
   NDBDICT *dict= ndb->getDictionary();
   DBUG_ENTER("invalidate_dictionary_cache");
   DBUG_PRINT("info", ("invalidating %s", tabname));
 
+#ifdef HAVE_NDB_BINLOG
+  char key[FN_REFLEN];
+  strxnmov(key, FN_LEN-1, mysql_data_home, "/",
+           dbname, "/", tabname, NullS);
+  DBUG_PRINT("info", ("Getting ndbcluster mutex"));
+  pthread_mutex_lock(&ndbcluster_mutex);
+  NDB_SHARE *ndb_share= (NDB_SHARE*)hash_search(&ndbcluster_open_tables,
+                                                (byte*) key, strlen(key));
+  pthread_mutex_unlock(&ndbcluster_mutex);
+  DBUG_PRINT("info", ("Released ndbcluster mutex"));
+  // Only binlog_thread is allowed to globally invalidate a table
+  if (global && ndb_share && ndb_share->op && (current_thd != injector_thd))
+    DBUG_RETURN(1);
+#endif
+
   if (global)
   {
     const NDBTAB *tab= dict->getTable(tabname);
     if (!tab)
-      DBUG_VOID_RETURN;
+      DBUG_RETURN(1);
     if (tab->getObjectStatus() == NdbDictionary::Object::Invalid)
     {
       // Global cache has already been invalidated
@@ -491,13 +507,14 @@
   else
     dict->removeCachedTable(tabname);
   share->version=0L;			/* Free when thread is ready */
-  DBUG_VOID_RETURN;
+  DBUG_RETURN(0);
 }
 
 void ha_ndbcluster::invalidate_dictionary_cache(bool global)
 {
   NDBDICT *dict= get_ndb()->getDictionary();
-  invalidate_dictionary_cache(table_share, get_ndb(), m_tabname, global);
+  if (invalidate_dictionary_cache(table_share, get_ndb(), m_dbname, m_tabname, global))
+    return;
   /* Invalidate indexes */
   for (uint i= 0; i < table_share->keys; i++)
   {
@@ -1256,10 +1273,9 @@
   Renumber indexes in index list by shifting out
   indexes that are to be dropped
  */
-int ha_ndbcluster::renumber_indexes(Ndb *ndb, TABLE *tab)
+void ha_ndbcluster::renumber_indexes(Ndb *ndb, TABLE *tab)
 {
   uint i;
-  int error= 0;
   const char *index_name;
   KEY* key_info= tab->key_info;
   const char **key_name= tab->s->keynames.type_names;
@@ -1288,7 +1304,7 @@
     }
   }
 
-  DBUG_RETURN(error);
+  DBUG_VOID_RETURN;
 }
 
 /*
@@ -4418,7 +4434,7 @@
   NDBDICT *dict= ndb->getDictionary();
   if (!(tab= dict->getTable(m_tabname)))
     DBUG_RETURN(0); // Must be a create, ignore since frm is saved in create
-
+  DBUG_ASSERT(m_share->state == NSS_ALTERED);
   name= table->s->normalized_path.str;
   DBUG_PRINT("enter", ("m_tabname: %s, path: %s", m_tabname, name));
   if (readfrm(name, &data, &length) ||
@@ -4427,17 +4443,18 @@
     DBUG_PRINT("info", ("Missing frm for %s", m_tabname));
     my_free((char*)data, MYF(MY_ALLOW_ZERO_PTR));
     my_free((char*)pack_data, MYF(MY_ALLOW_ZERO_PTR));
-    DBUG_RETURN(1);
+    error= 1;
   }
-  if (cmp_frm(tab, pack_data, pack_length))
-  {  
+  else
+  {
     DBUG_PRINT("info", ("Table %s has changed, altering frm in ndb", 
                         m_tabname));
     error= table_changed(pack_data, pack_length);
-    m_share->state= NSS_INITIAL;
+    my_free((char*)data, MYF(MY_ALLOW_ZERO_PTR));
+    my_free((char*)pack_data, MYF(MY_ALLOW_ZERO_PTR));
   }
-  my_free((char*)data, MYF(MY_ALLOW_ZERO_PTR));
-  my_free((char*)pack_data, MYF(MY_ALLOW_ZERO_PTR));
+  m_share->state= NSS_INITIAL;
+  free_share(&m_share); // Decrease ref_count
 
   DBUG_RETURN(error);
 }
@@ -4554,6 +4571,7 @@
   int error= 0;
   uint idx;
 
+  DBUG_ASSERT(m_share->state == NSS_INITIAL);
   for (idx= 0; idx < num_of_keys; idx++)
   {
     KEY *key= key_info + idx;
@@ -4569,7 +4587,11 @@
     if((error= create_index(key_info[idx].name, key, idx_type, idx)))
       break;
   }
-  m_share->state= NSS_ALTERED;
+  if (!error)
+  {
+    ndbcluster_get_share(m_share); // Increase ref_count
+    m_share->state= NSS_ALTERED;
+  }
   DBUG_RETURN(error);  
 }
 
@@ -4593,6 +4615,7 @@
                                       uint *key_num, uint num_of_keys)
 {
   DBUG_ENTER("ha_ndbcluster::prepare_drop_index");
+  DBUG_ASSERT(m_share->state == NSS_INITIAL);
   // Mark indexes for deletion
   uint idx;
   for (idx= 0; idx < num_of_keys; idx++)
@@ -4604,8 +4627,10 @@
   THD *thd= current_thd;
   Thd_ndb *thd_ndb= get_thd_ndb(thd);
   Ndb *ndb= thd_ndb->ndb;
+  renumber_indexes(ndb, table_arg);
+  ndbcluster_get_share(m_share); // Increase ref_count
   m_share->state= NSS_ALTERED;
-  DBUG_RETURN(renumber_indexes(ndb, table_arg));
+  DBUG_RETURN(0);
 }
  
 /*
@@ -4613,13 +4638,19 @@
 */
 int ha_ndbcluster::final_drop_index(TABLE *table_arg)
 {
+  int error;
   DBUG_ENTER("ha_ndbcluster::final_drop_index");
   DBUG_PRINT("info", ("ha_ndbcluster::final_drop_index"));
   // Really drop indexes
   THD *thd= current_thd;
   Thd_ndb *thd_ndb= get_thd_ndb(thd);
   Ndb *ndb= thd_ndb->ndb;
-  DBUG_RETURN(drop_indexes(ndb, table_arg));
+  if((error= drop_indexes(ndb, table_arg)))
+  {
+    m_share->state= NSS_INITIAL;
+    free_share(&m_share); // Decrease ref_count
+  }
+  DBUG_RETURN(error);
 }
 
 /*
@@ -5237,41 +5268,66 @@
   const void* data;
   const NDBTAB* tab;
   Ndb* ndb;
+  char key[FN_REFLEN];
   DBUG_ENTER("ndbcluster_discover");
   DBUG_PRINT("enter", ("db: %s, name: %s", db, name)); 
 
   if (!(ndb= check_ndb_in_thd(thd)))
     DBUG_RETURN(HA_ERR_NO_CONNECTION);  
   ndb->setDatabaseName(db);
-
   NDBDICT* dict= ndb->getDictionary();
   dict->set_local_table_data_size(sizeof(Ndb_local_table_statistics));
   dict->invalidateTable(name);
-  if (!(tab= dict->getTable(name)))
-  {    
-    const NdbError err= dict->getNdbError();
-    if (err.code == 709 || err.code == 723)
-      DBUG_RETURN(-1);
-    ERR_RETURN(err);
-  }
-  DBUG_PRINT("info", ("Found table %s", tab->getName()));
-  
-  len= tab->getFrmLength();  
-  if (len == 0 || tab->getFrmData() == NULL)
+  strxnmov(key, FN_LEN-1, mysql_data_home, "/", db, "/", name, NullS);
+  NDB_SHARE *share= get_share(key, 0, false);
+  if (share && share->state == NSS_ALTERED)
   {
-    DBUG_PRINT("error", ("No frm data found."));
-    DBUG_RETURN(1);
+    // Frm has been altered on disk, but not yet written to ndb
+    if (readfrm(key, &data, &len))
+    {
+      DBUG_PRINT("error", ("Could not read frm"));
+      if (share)
+        free_share(&share);
+      DBUG_RETURN(1);
+    }
   }
-  
-  if (unpackfrm(&data, &len, tab->getFrmData()))
+  else
   {
-    DBUG_PRINT("error", ("Could not unpack table"));
-    DBUG_RETURN(1);
+    if (!(tab= dict->getTable(name)))
+    {    
+      const NdbError err= dict->getNdbError();
+      if (share)
+        free_share(&share);
+      if (err.code == 709 || err.code == 723)
+        DBUG_RETURN(-1);
+      ERR_RETURN(err);
+    }
+    DBUG_PRINT("info", ("Found table %s", tab->getName()));
+    
+    len= tab->getFrmLength();  
+    if (len == 0 || tab->getFrmData() == NULL)
+    {
+      DBUG_PRINT("error", ("No frm data found."));
+      if (share)
+        free_share(&share);
+      DBUG_RETURN(1);
+    }
+    
+    if (unpackfrm(&data, &len, tab->getFrmData()))
+    {
+      DBUG_PRINT("error", ("Could not unpack table"));
+      if (share)
+        free_share(&share);
+      DBUG_RETURN(1);
+    }
   }
 
   *frmlen= len;
   *frmblob= data;
   
+  if (share)
+    free_share(&share);
+
   DBUG_RETURN(0);
 }
 
@@ -9218,21 +9274,35 @@
 bool ha_ndbcluster::check_if_incompatible_data(HA_CREATE_INFO *info,
 					       uint table_changes)
 {
-  return COMPATIBLE_DATA_NO; // Disable fast add/drop index  
+  DBUG_ENTER("ha_ndbcluster::check_if_incompatible_data");
+  uint i;
+  const NDBTAB *tab= (const NDBTAB *) m_table;
+
+  for (i= 0; i < table->s->fields; i++) 
+  {
+    Field *field= table->field[i];
+    const NDBCOL *col= tab->getColumn(field->field_name);
+    if (field->add_index &&
+        col->getStorageType() == NdbDictionary::Column::StorageTypeDisk)
+    {
+      DBUG_PRINT("info", ("add/drop index not supported for disk stored column"));
+      DBUG_RETURN(COMPATIBLE_DATA_NO);
+    }
+  }
   if (table_changes != IS_EQUAL_YES)
-    return COMPATIBLE_DATA_NO;
+    DBUG_RETURN(COMPATIBLE_DATA_NO);
   
   /* Check that auto_increment value was not changed */
   if ((info->used_fields & HA_CREATE_USED_AUTO) &&
       info->auto_increment_value != 0)
-    return COMPATIBLE_DATA_NO;
+    DBUG_RETURN(COMPATIBLE_DATA_NO);
   
   /* Check that row format didn't change */
   if ((info->used_fields & HA_CREATE_USED_AUTO) &&
       get_row_type() != info->row_type)
-    return COMPATIBLE_DATA_NO;
+    DBUG_RETURN(COMPATIBLE_DATA_NO);
 
-  return COMPATIBLE_DATA_YES;
+  DBUG_RETURN(COMPATIBLE_DATA_YES);
 }
 
 bool set_up_tablespace(st_alter_tablespace *info,

--- 1.9/sql/ha_ndbcluster_binlog.cc	2006-01-31 17:21:28 +01:00
+++ 1.10/sql/ha_ndbcluster_binlog.cc	2006-01-31 21:34:00 +01:00
@@ -1277,6 +1277,7 @@
     ndb->setDatabaseName(share->table->s->db.str);
     ha_ndbcluster::invalidate_dictionary_cache(share->table->s,
                                                ndb,
+                                               share->table->s->db.str,
                                                share->table->s->table_name.str,
                                                TRUE);
     remote_drop_table= 1;
Thread
bk commit into 5.1 tree (tomas:1.2098)tomas31 Jan