List:Commits« Previous MessageNext Message »
From:jonas oreland Date:October 20 2011 4:18pm
Subject:bzr push into mysql-5.1-telco-7.0 branch (jonas.oreland:4613 to 4614)
View as plain text  
 4614 jonas oreland	2011-10-20
      ndb stab 2 at new warnings

    modified:
      sql/ha_ndb_index_stat.cc
      sql/ha_ndbcluster.cc
      sql/ha_ndbcluster_binlog.cc
      sql/ha_ndbcluster_binlog.h
      sql/ha_ndbinfo.cc
      storage/ndb/src/common/portlib/ndb_daemon.cc
      storage/ndb/src/common/util/ndb_init.cpp
      storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp
      storage/ndb/src/kernel/vm/SimulatedBlock.cpp
 4613 Jonas Oreland	2011-10-20
      ndb - first stab at windows warning

    modified:
      sql/ha_ndbcluster.cc
      storage/ndb/src/common/util/ndb_init.cpp
      storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp
      storage/ndb/src/kernel/blocks/ndbfs/Filename.cpp
      storage/ndb/src/kernel/error/ndbd_exit_codes.c
      storage/ndb/src/kernel/vm/SimulatedBlock.cpp
      storage/ndb/src/mgmapi/ndb_logevent.cpp
      storage/ndb/src/mgmsrv/Defragger.hpp
      storage/ndb/src/ndbapi/ndberror.c
=== modified file 'sql/ha_ndb_index_stat.cc'
--- a/sql/ha_ndb_index_stat.cc	2011-10-17 12:43:31 +0000
+++ b/sql/ha_ndb_index_stat.cc	2011-10-20 16:18:28 +0000
@@ -227,7 +227,7 @@ ndb_index_stat_opt2str(const Ndb_index_s
     const Ndb_index_stat_opt::Val& v= opt.val[i];
     ptr+= strlen(ptr);
     const char* sep= (ptr == buf ? "" : ",");
-    const uint sz= ptr < end ? end - ptr : 0;
+    const uint sz= ptr < end ? (uint)(end - ptr) : 0;
 
     switch (v.unit) {
     case Ndb_index_stat_opt::Ubool:

=== modified file 'sql/ha_ndbcluster.cc'
--- a/sql/ha_ndbcluster.cc	2011-10-20 13:01:37 +0000
+++ b/sql/ha_ndbcluster.cc	2011-10-20 16:18:28 +0000
@@ -2003,7 +2003,7 @@ void ha_ndbcluster::release_blobs_buffer
 */
 
 int cmp_frm(const NDBTAB *ndbtab, const void *pack_data,
-            uint pack_length)
+            size_t pack_length)
 {
   DBUG_ENTER("cmp_frm");
   /*
@@ -3600,7 +3600,7 @@ count_key_columns(const KEY *key_info, c
       break;
     length+= key_part->store_length;
   }
-  return key_part - first_key_part;
+  return (uint)(key_part - first_key_part);
 }
 
 /* Helper method to compute NDB index bounds. Note: does not set range_no. */
@@ -7792,7 +7792,7 @@ static int ndbcluster_update_apply_statu
   // log_name
   char tmp_buf[FN_REFLEN];
   ndb_pack_varchar(ndbtab->getColumn(2u), tmp_buf,
-                   group_master_log_name, strlen(group_master_log_name));
+                   group_master_log_name, (int)strlen(group_master_log_name));
   r|= op->setValue(2u, tmp_buf);
   DBUG_ASSERT(r == 0);
   // start_pos
@@ -9382,7 +9382,7 @@ void ha_ndbcluster::update_create_info(H
            goto err;
 	 const char *tablespace= ts.getName();
          DBUG_PRINT("info", ("Found tablespace '%s'", tablespace));
-         uint tablespace_len= strlen(tablespace);
+         uint tablespace_len= (uint)strlen(tablespace);
          if (tablespace_len != 0) 
          {
            share->tablespace= (char *) alloc_root(&share->mem_root,
@@ -9551,7 +9551,7 @@ int ha_ndbcluster::create(const char *na
     */
     if ((my_errno= write_ndb_file(name)))
       DBUG_RETURN(my_errno);
-    ndbcluster_create_binlog_setup(thd, ndb, name, strlen(name),
+    ndbcluster_create_binlog_setup(thd, ndb, name, (uint)strlen(name),
                                    m_dbname, m_tabname, FALSE);
     DBUG_RETURN(my_errno);
   }
@@ -10463,7 +10463,7 @@ int ha_ndbcluster::rename_table(const ch
       this is a "real" rename table, i.e. not tied to an offline alter table
       - send new name == "to" in query field
     */
-    ndbcluster_log_schema_op(thd, to, strlen(to),
+    ndbcluster_log_schema_op(thd, to, (int)strlen(to),
                              old_dbname, m_tabname,
                              ndb_table_id, ndb_table_version,
                              SOT_RENAME_TABLE_PREPARE,
@@ -11127,7 +11127,7 @@ int ha_ndbcluster::open(const char *name
                             name);
     }
     Ndb* ndb= check_ndb_in_thd(thd);
-    ndbcluster_create_binlog_setup(thd, ndb, name, strlen(name),
+    ndbcluster_create_binlog_setup(thd, ndb, name, (uint)strlen(name),
                                    m_dbname, m_tabname, FALSE);
     if ((m_share=get_share(name, table, FALSE)) == 0)
     {
@@ -11783,7 +11783,7 @@ int ndbcluster_drop_database_impl(THD *t
   List_iterator_fast<char> it(drop_list);
   while ((tabname=it++))
   {
-    tablename_to_filename(tabname, tmp, FN_REFLEN - (tmp - full_path)-1);
+    tablename_to_filename(tabname, tmp, (uint)(FN_REFLEN - (tmp - full_path)-1));
     mysql_mutex_lock(&LOCK_open);
     if (ha_ndbcluster::drop_table(thd, 0, ndb, full_path, dbname, tabname))
     {
@@ -11925,7 +11925,7 @@ int ndbcluster_find_all_files(THD *thd)
       }
       /* finalize construction of path */
       end+= tablename_to_filename(elmt.name, end,
-                                  sizeof(key)-(end-key));
+                                  (uint)(sizeof(key)-(end-key)));
       uchar *data= 0, *pack_data= 0;
       size_t length, pack_length;
       int discover= 0;
@@ -12146,9 +12146,9 @@ ndbcluster_find_files(handlerton *hton,
     {
       file_name_str= (char*)my_hash_element(&ok_tables, i);
       end= end1 +
-        tablename_to_filename(file_name_str, end1, sizeof(name) - (end1 - name));
+        tablename_to_filename(file_name_str, end1, (uint)(sizeof(name) - (end1 - name)));
       mysql_mutex_lock(&LOCK_open);
-      ndbcluster_create_binlog_setup(thd, ndb, name, end-name,
+      ndbcluster_create_binlog_setup(thd, ndb, name, (uint)(end-name),
                                      db, file_name_str, TRUE);
       mysql_mutex_unlock(&LOCK_open);
     }
@@ -12213,7 +12213,7 @@ ndbcluster_find_files(handlerton *hton,
     {
       LEX_STRING *tmp_file_name= 0;
       tmp_file_name= thd->make_lex_string(tmp_file_name, file_name_str,
-                                          strlen(file_name_str), TRUE);
+                                          (uint)strlen(file_name_str), TRUE);
       files->push_back(tmp_file_name); 
     }
   }
@@ -12620,7 +12620,7 @@ void ha_ndbcluster::set_dbname(const cha
   while (ptr >= path_name && *ptr != '\\' && *ptr != '/') {
     ptr--;
   }
-  uint name_len= end - ptr;
+  uint name_len= (uint)(end - ptr);
   memcpy(tmp_name, ptr + 1, name_len);
   tmp_name[name_len]= '\0';
   filename_to_tablename(tmp_name, dbname, sizeof(tmp_buff) - 1);
@@ -12652,7 +12652,7 @@ ha_ndbcluster::set_tabname(const char *p
   while (ptr >= path_name && *ptr != '\\' && *ptr != '/') {
     ptr--;
   }
-  uint name_len= end - ptr;
+  uint name_len= (uint)(end - ptr);
   memcpy(tmp_name, ptr + 1, end - ptr);
   tmp_name[name_len]= '\0';
   filename_to_tablename(tmp_name, tabname, sizeof(tmp_buff) - 1);
@@ -13427,7 +13427,7 @@ int handle_trailing_share(THD *thd, NDB_
       share->key_length= min_key_length;
     }
     share->key_length=
-      my_snprintf(share->key, min_key_length + 1, "#leak%lu",
+      (uint)my_snprintf(share->key, min_key_length + 1, "#leak%lu",
                   trailing_share_id++);
   }
   /* Keep it for possible the future trailing free */
@@ -14630,7 +14630,7 @@ ha_ndbcluster::read_multi_range_next(KEY
           need to process all index scan ranges together.
         */
         if (!multi_range_sorted ||
-            (expected_range_no= multi_range_curr - m_multi_ranges)
+            (expected_range_no= (int)(multi_range_curr - m_multi_ranges))
                 == current_range_no)
         {
           *multi_range_found_p= m_multi_ranges + current_range_no;
@@ -14679,7 +14679,7 @@ ha_ndbcluster::read_multi_range_next(KEY
   */
   DBUG_RETURN(read_multi_range_first(multi_range_found_p, 
                                      multi_range_curr,
-                                     multi_range_end - multi_range_curr, 
+                                     (uint)(multi_range_end - multi_range_curr), 
                                      multi_range_sorted,
                                      multi_range_buffer));
 }
@@ -14738,7 +14738,7 @@ ha_ndbcluster::update_table_comment(
         const char*     comment)/* in:  table comment defined by user */
 {
   THD *thd= current_thd;
-  uint length= strlen(comment);
+  uint length= (uint)strlen(comment);
   if (length > 64000 - 3)
   {
     return((char*)comment); /* string too long */
@@ -14759,7 +14759,7 @@ ha_ndbcluster::update_table_comment(
 
   char *str;
   const char *fmt="%s%snumber_of_replicas: %d";
-  const unsigned fmt_len_plus_extra= length + strlen(fmt);
+  const unsigned fmt_len_plus_extra= length + (uint)strlen(fmt);
   if ((str= (char*) my_malloc(fmt_len_plus_extra, MYF(0))) == NULL)
   {
     sql_print_error("ha_ndbcluster::update_table_comment: "
@@ -15145,7 +15145,7 @@ ndbcluster_show_status(handlerton *hton,
   else
     update_status_variables(NULL, &ns, g_ndb_cluster_connection);
 
-  buflen=
+  buflen= (uint)
     my_snprintf(buf, sizeof(buf),
                 "cluster_node_id=%ld, "
                 "connected_host=%s, "
@@ -15168,7 +15168,7 @@ ndbcluster_show_status(handlerton *hton,
     if (ns.transaction_hint_count[i] > 0 ||
         ns.transaction_no_hint_count[i] > 0)
     {
-      uint namelen= my_snprintf(name, sizeof(name), "node[%d]", i);
+      uint namelen= (uint)my_snprintf(name, sizeof(name), "node[%d]", i);
       buflen= my_snprintf(buf, sizeof(buf),
                           "transaction_hint=%ld, transaction_no_hint=%ld",
                           ns.transaction_hint_count[i],
@@ -15185,12 +15185,12 @@ ndbcluster_show_status(handlerton *hton,
     tmp.m_name= 0;
     while (ndb->get_free_list_usage(&tmp))
     {
-      buflen=
+      buflen= (uint)
         my_snprintf(buf, sizeof(buf),
                   "created=%u, free=%u, sizeof=%u",
                   tmp.m_created, tmp.m_free, tmp.m_sizeof);
       if (stat_print(thd, ndbcluster_hton_name, ndbcluster_hton_name_length,
-                     tmp.m_name, strlen(tmp.m_name), buf, buflen))
+                     tmp.m_name, (uint)strlen(tmp.m_name), buf, buflen))
         DBUG_RETURN(TRUE);
     }
   }
@@ -16686,14 +16686,14 @@ static int ndbcluster_fill_files_table(h
       }
 
       table->field[IS_FILES_FILE_NAME]->set_notnull();
-      table->field[IS_FILES_FILE_NAME]->store(elt.name, strlen(elt.name),
+      table->field[IS_FILES_FILE_NAME]->store(elt.name, (uint)strlen(elt.name),
                                               system_charset_info);
       table->field[IS_FILES_FILE_TYPE]->set_notnull();
       table->field[IS_FILES_FILE_TYPE]->store("DATAFILE",8,
                                               system_charset_info);
       table->field[IS_FILES_TABLESPACE_NAME]->set_notnull();
       table->field[IS_FILES_TABLESPACE_NAME]->store(df.getTablespace(),
-                                                    strlen(df.getTablespace()),
+                                                    (uint)strlen(df.getTablespace()),
                                                     system_charset_info);
       table->field[IS_FILES_LOGFILE_GROUP_NAME]->set_notnull();
       table->field[IS_FILES_LOGFILE_GROUP_NAME]->
@@ -16724,7 +16724,7 @@ static int ndbcluster_fill_files_table(h
       table->field[IS_FILES_ROW_FORMAT]->store("FIXED", 5, system_charset_info);
 
       char extra[30];
-      int len= my_snprintf(extra, sizeof(extra), "CLUSTER_NODE=%u", id);
+      int len= (int)my_snprintf(extra, sizeof(extra), "CLUSTER_NODE=%u", id);
       table->field[IS_FILES_EXTRA]->set_notnull();
       table->field[IS_FILES_EXTRA]->store(extra, len, system_charset_info);
       schema_table_store_record(thd, table);
@@ -16757,12 +16757,12 @@ static int ndbcluster_fill_files_table(h
 
     table->field[IS_FILES_TABLESPACE_NAME]->set_notnull();
     table->field[IS_FILES_TABLESPACE_NAME]->store(elt.name,
-                                                     strlen(elt.name),
+                                                     (uint)strlen(elt.name),
                                                      system_charset_info);
     table->field[IS_FILES_LOGFILE_GROUP_NAME]->set_notnull();
     table->field[IS_FILES_LOGFILE_GROUP_NAME]->
       store(ts.getDefaultLogfileGroup(),
-           strlen(ts.getDefaultLogfileGroup()),
+           (uint)strlen(ts.getDefaultLogfileGroup()),
            system_charset_info);
 
     table->field[IS_FILES_ENGINE]->set_notnull();
@@ -16817,7 +16817,7 @@ static int ndbcluster_fill_files_table(h
 
       init_fill_schema_files_row(table);
       table->field[IS_FILES_FILE_NAME]->set_notnull();
-      table->field[IS_FILES_FILE_NAME]->store(elt.name, strlen(elt.name),
+      table->field[IS_FILES_FILE_NAME]->store(elt.name, (uint)strlen(elt.name),
                                               system_charset_info);
       table->field[IS_FILES_FILE_TYPE]->set_notnull();
       table->field[IS_FILES_FILE_TYPE]->store("UNDO LOG", 8,
@@ -16826,7 +16826,7 @@ static int ndbcluster_fill_files_table(h
       uf.getLogfileGroupId(&objid);
       table->field[IS_FILES_LOGFILE_GROUP_NAME]->set_notnull();
       table->field[IS_FILES_LOGFILE_GROUP_NAME]->store(uf.getLogfileGroup(),
-                                                  strlen(uf.getLogfileGroup()),
+                                                  (uint)strlen(uf.getLogfileGroup()),
                                                        system_charset_info);
       table->field[IS_FILES_LOGFILE_GROUP_NUMBER]->set_notnull();
       table->field[IS_FILES_LOGFILE_GROUP_NUMBER]->store(objid.getObjectId(), true);
@@ -16849,7 +16849,7 @@ static int ndbcluster_fill_files_table(h
       table->field[IS_FILES_VERSION]->store(uf.getObjectVersion(), true);
 
       char extra[100];
-      int len= my_snprintf(extra,sizeof(extra),"CLUSTER_NODE=%u;UNDO_BUFFER_SIZE=%lu",
+      int len= (int)my_snprintf(extra,sizeof(extra),"CLUSTER_NODE=%u;UNDO_BUFFER_SIZE=%lu",
                            id, (ulong) lfg.getUndoBufferSize());
       table->field[IS_FILES_EXTRA]->set_notnull();
       table->field[IS_FILES_EXTRA]->store(extra, len, system_charset_info);
@@ -16884,7 +16884,7 @@ static int ndbcluster_fill_files_table(h
 
     table->field[IS_FILES_LOGFILE_GROUP_NAME]->set_notnull();
     table->field[IS_FILES_LOGFILE_GROUP_NAME]->store(elt.name,
-                                                     strlen(elt.name),
+                                                     (uint)strlen(elt.name),
                                                      system_charset_info);
     table->field[IS_FILES_LOGFILE_GROUP_NUMBER]->set_notnull();
     table->field[IS_FILES_LOGFILE_GROUP_NUMBER]->store(lfg.getObjectId(), true);
@@ -16902,7 +16902,7 @@ static int ndbcluster_fill_files_table(h
     table->field[IS_FILES_VERSION]->store(lfg.getObjectVersion(), true);
 
     char extra[100];
-    int len= my_snprintf(extra,sizeof(extra),
+    int len= (int)my_snprintf(extra,sizeof(extra),
                          "UNDO_BUFFER_SIZE=%lu",
                          (ulong) lfg.getUndoBufferSize());
     table->field[IS_FILES_EXTRA]->set_notnull();

=== modified file 'sql/ha_ndbcluster_binlog.cc'
--- a/sql/ha_ndbcluster_binlog.cc	2011-10-17 12:43:31 +0000
+++ b/sql/ha_ndbcluster_binlog.cc	2011-10-20 16:18:28 +0000
@@ -1549,7 +1549,7 @@ static int ndbcluster_find_all_databases
             /* create missing database */
             sql_print_information("NDB: Discovered missing database '%s'", db);
             const int no_print_error[1]= {0};
-            name_len= my_snprintf(name, sizeof(name), "CREATE DATABASE %s", db);
+            name_len= (unsigned)my_snprintf(name, sizeof(name), "CREATE DATABASE %s", db);
             run_query(thd, name, name + name_len,
                       no_print_error,    /* print error */
                       TRUE,   /* don't binlog the query */
@@ -1891,12 +1891,12 @@ ndbcluster_update_slock(THD *thd,
       DBUG_ASSERT(r == 0);
     
       /* db */
-      ndb_pack_varchar(col[SCHEMA_DB_I], tmp_buf, db, strlen(db));
+      ndb_pack_varchar(col[SCHEMA_DB_I], tmp_buf, db, (int)strlen(db));
       r|= op->equal(SCHEMA_DB_I, tmp_buf);
       DBUG_ASSERT(r == 0);
       /* name */
       ndb_pack_varchar(col[SCHEMA_NAME_I], tmp_buf, table_name,
-                       strlen(table_name));
+                       (int)strlen(table_name));
       r|= op->equal(SCHEMA_NAME_I, tmp_buf);
       DBUG_ASSERT(r == 0);
       /* slock */
@@ -1934,12 +1934,12 @@ ndbcluster_update_slock(THD *thd,
       DBUG_ASSERT(r == 0);
 
       /* db */
-      ndb_pack_varchar(col[SCHEMA_DB_I], tmp_buf, db, strlen(db));
+      ndb_pack_varchar(col[SCHEMA_DB_I], tmp_buf, db, (int)strlen(db));
       r|= op->equal(SCHEMA_DB_I, tmp_buf);
       DBUG_ASSERT(r == 0);
       /* name */
       ndb_pack_varchar(col[SCHEMA_NAME_I], tmp_buf, table_name,
-                       strlen(table_name));
+                       (int)strlen(table_name));
       r|= op->equal(SCHEMA_NAME_I, tmp_buf);
       DBUG_ASSERT(r == 0);
       /* slock */
@@ -2284,12 +2284,12 @@ int ndbcluster_log_schema_op(THD *thd,
       DBUG_ASSERT(r == 0);
       
       /* db */
-      ndb_pack_varchar(col[SCHEMA_DB_I], tmp_buf, log_db, strlen(log_db));
+      ndb_pack_varchar(col[SCHEMA_DB_I], tmp_buf, log_db, (int)strlen(log_db));
       r|= op->equal(SCHEMA_DB_I, tmp_buf);
       DBUG_ASSERT(r == 0);
       /* name */
       ndb_pack_varchar(col[SCHEMA_NAME_I], tmp_buf, log_tab,
-                       strlen(log_tab));
+                       (int)strlen(log_tab));
       r|= op->equal(SCHEMA_NAME_I, tmp_buf);
       DBUG_ASSERT(r == 0);
       /* slock */
@@ -2789,7 +2789,7 @@ ndb_binlog_thread_handle_schema_event(TH
           // fall through
         case SOT_RENAME_TABLE_NEW:
         {
-          uint end= my_snprintf(&errmsg[0], MYSQL_ERRMSG_SIZE,
+          uint end= (uint)my_snprintf(&errmsg[0], MYSQL_ERRMSG_SIZE,
                                 "NDB Binlog: Skipping renaming locally "
                                 "defined table '%s.%s' from binlog schema "
                                 "event '%s' from node %d. ",
@@ -2801,7 +2801,7 @@ ndb_binlog_thread_handle_schema_event(TH
         case SOT_DROP_TABLE:
           if (schema_type == SOT_DROP_TABLE)
           {
-            uint end= my_snprintf(&errmsg[0], MYSQL_ERRMSG_SIZE,
+            uint end= (uint)my_snprintf(&errmsg[0], MYSQL_ERRMSG_SIZE,
                                   "NDB Binlog: Skipping dropping locally "
                                   "defined table '%s.%s' from binlog schema "
                                   "event '%s' from node %d. ",
@@ -3562,7 +3562,7 @@ ndb_binlog_index_table__write_rows(THD *
 
     ndb_binlog_index->field[0]->store(first->master_log_pos, true);
     ndb_binlog_index->field[1]->store(first->master_log_file,
-                                      strlen(first->master_log_file),
+                                      (uint)strlen(first->master_log_file),
                                       &my_charset_bin);
     ndb_binlog_index->field[2]->store(epoch= first->epoch, true);
     if (ndb_binlog_index->s->fields > 7)
@@ -4300,7 +4300,7 @@ parse_conflict_fn_spec(const char* confl
   {
     const st_conflict_fn_def &fn= conflict_fns[i];
 
-    uint len= strlen(fn.name);
+    uint len= (uint)strlen(fn.name);
     if (strncmp(ptr, fn.name, len))
       continue;
 
@@ -4372,7 +4372,7 @@ parse_conflict_fn_spec(const char* confl
         }
       }
 
-      uint len= end_arg - start_arg;
+      uint len= (uint)(end_arg - start_arg);
       args[no_args].type=    type;
       args[no_args].ptr=     start_arg;
       args[no_args].len=     len;
@@ -4701,9 +4701,9 @@ ndbcluster_read_replication_table(THD *t
       DBUG_PRINT("info", ("reading[%u]: %s,%s,%u", i, db, table_name, id));
       if ((_op= trans->getNdbOperation(reptab)) == NULL) abort();
       if (_op->readTuple(NdbOperation::LM_CommittedRead)) abort();
-      ndb_pack_varchar(col_db, tmp_buf, db, strlen(db));
+      ndb_pack_varchar(col_db, tmp_buf, db, (int)strlen(db));
       if (_op->equal(col_db->getColumnNo(), tmp_buf)) abort();
-      ndb_pack_varchar(col_table_name, tmp_buf, table_name, strlen(table_name));
+      ndb_pack_varchar(col_table_name, tmp_buf, table_name, (int)strlen(table_name));
       if (_op->equal(col_table_name->getColumnNo(), tmp_buf)) abort();
       if (_op->equal(col_server_id->getColumnNo(), id)) abort();
       if ((col_binlog_type_rec_attr[i]=
@@ -5478,7 +5478,7 @@ ndbcluster_create_event_ops(THD *thd, ND
   Ndb_event_data *event_data= share->event_data;
   int do_ndb_schema_share= 0, do_ndb_apply_status_share= 0;
 #ifdef HAVE_NDB_BINLOG
-  uint len= strlen(share->table_name);
+  uint len= (int)strlen(share->table_name);
 #endif
   if (!ndb_schema_share && strcmp(share->db, NDB_REP_DB) == 0 &&
       strcmp(share->table_name, NDB_SCHEMA_TABLE) == 0)
@@ -6851,7 +6851,7 @@ restart_cluster_failure:
     {
       LOG_INFO log_info;
       mysql_bin_log.get_current_log(&log_info);
-      int len=  strlen(log_info.log_file_name);
+      int len=  (uint)strlen(log_info.log_file_name);
       uint no= 0;
       if ((sscanf(log_info.log_file_name + len - 6, "%u", &no) == 1) &&
           no == 1)
@@ -7710,7 +7710,7 @@ ndbcluster_show_status_binlog(THD* thd,
     ndb_latest_epoch= injector_ndb->getLatestGCI();
     pthread_mutex_unlock(&injector_mutex);
 
-    buflen=
+    buflen= (uint)
       my_snprintf(buf, sizeof(buf),
                   "latest_epoch=%s, "
                   "latest_trans_epoch=%s, "
@@ -7723,7 +7723,7 @@ ndbcluster_show_status_binlog(THD* thd,
                   llstr(ndb_latest_handled_binlog_epoch, buff4),
                   llstr(ndb_latest_applied_binlog_epoch, buff5));
     if (stat_print(thd, ndbcluster_hton_name, ndbcluster_hton_name_length,
-                   "binlog", strlen("binlog"),
+                   "binlog", (uint)strlen("binlog"),
                    buf, buflen))
       DBUG_RETURN(TRUE);
   }

=== modified file 'sql/ha_ndbcluster_binlog.h'
--- a/sql/ha_ndbcluster_binlog.h	2011-09-07 22:50:01 +0000
+++ b/sql/ha_ndbcluster_binlog.h	2011-10-20 16:18:28 +0000
@@ -299,7 +299,7 @@ ndbcluster_show_status_binlog(THD* thd,
   the ndb binlog code
 */
 int cmp_frm(const NDBTAB *ndbtab, const void *pack_data,
-            uint pack_length);
+            size_t pack_length);
 int ndbcluster_find_all_files(THD *thd);
 
 char *ndb_pack_varchar(const NDBCOL *col, char *buf,

=== modified file 'sql/ha_ndbinfo.cc'
--- a/sql/ha_ndbinfo.cc	2011-10-17 12:43:31 +0000
+++ b/sql/ha_ndbinfo.cc	2011-10-20 16:18:28 +0000
@@ -245,7 +245,7 @@ bool ha_ndbinfo::get_error_message(int e
   if (!message)
     DBUG_RETURN(false);
 
-  buf->set(message, strlen(message), &my_charset_bin);
+  buf->set(message, (uint32)strlen(message), &my_charset_bin);
   DBUG_PRINT("exit", ("message: %s", buf->ptr()));
   DBUG_RETURN(false);
 }

=== modified file 'storage/ndb/src/common/portlib/ndb_daemon.cc'
--- a/storage/ndb/src/common/portlib/ndb_daemon.cc	2011-01-30 23:13:49 +0000
+++ b/storage/ndb/src/common/portlib/ndb_daemon.cc	2011-10-20 16:18:28 +0000
@@ -315,7 +315,7 @@ do_files(const char *pidfile_name, const
                 pidfile_name, errno);
 
   char buf[32];
-  int length = my_snprintf(buf, sizeof(buf), "%ld",
+  int length = (int)my_snprintf(buf, sizeof(buf), "%ld",
                            (long)NdbHost_GetProcessId());
   if (write(pidfd, buf, length) != length)
     return ERR1("Failed to write pid to pidfile '%s', errno: %d",

=== modified file 'storage/ndb/src/common/util/ndb_init.cpp'
--- a/storage/ndb/src/common/util/ndb_init.cpp	2011-10-20 13:01:37 +0000
+++ b/storage/ndb/src/common/util/ndb_init.cpp	2011-10-20 16:18:28 +0000
@@ -56,7 +56,7 @@ ndb_init_internal()
   {
     {
       const char* err = "ndb_init() failed - exit\n";
-      int res = (int)write(2, err, strlen(err));
+      int res = (int)write(2, err, (unsigned)strlen(err));
       (void)res;
       exit(1);
     }
@@ -79,7 +79,7 @@ ndb_init()
     if (my_init())
     {
       const char* err = "my_init() failed - exit\n";
-      int res = (int)write(2, err, strlen(err));
+      int res = (int)write(2, err, (unsigned)strlen(err));
       (void)res;
       exit(1);
     }

=== modified file 'storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp'
--- a/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp	2011-10-20 13:01:37 +0000
+++ b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp	2011-10-20 16:18:28 +0000
@@ -128,7 +128,7 @@ AsyncFile::writeReq(Request * request)
           if (((i + 1) < request->par.readWrite.numberOfPages)) {
             // There are more pages to write
             // Check that offsets are consequtive
-            off_t tmp=(off_t)page_offset + request->par.readWrite.pages[i].size;
+            off_t tmp=(off_t)(page_offset+request->par.readWrite.pages[i].size);
             if (tmp != request->par.readWrite.pages[i+1].offset) {
               // Next page is not aligned with previous, not allowed
               DEBUG(ndbout_c("Page offsets are not aligned"));

=== modified file 'storage/ndb/src/kernel/vm/SimulatedBlock.cpp'
--- a/storage/ndb/src/kernel/vm/SimulatedBlock.cpp	2011-10-20 13:01:37 +0000
+++ b/storage/ndb/src/kernel/vm/SimulatedBlock.cpp	2011-10-20 16:18:28 +0000
@@ -1847,7 +1847,7 @@ SimulatedBlock::infoEvent(const char * m
   signalT.header.theSendersBlockRef      = reference();
   signalT.header.theTrace                = tTrace;
   signalT.header.theSignalId             = tSignalId;
-  signalT.header.theLength               = ((len+3)/4)+1;
+  signalT.header.theLength               = (Uint32)((len+3)/4)+1;
   
 #ifdef NDBD_MULTITHREADED
   sendlocal(m_threadId,
@@ -1892,7 +1892,7 @@ SimulatedBlock::warningEvent(const char
   signalT.header.theSendersBlockRef      = reference();
   signalT.header.theTrace                = tTrace;
   signalT.header.theSignalId             = tSignalId;
-  signalT.header.theLength               = ((len+3)/4)+1;
+  signalT.header.theLength               = (Uint32)((len+3)/4)+1;
 
 #ifdef NDBD_MULTITHREADED
   sendlocal(m_threadId,

No bundle (reason: useless for push emails).
Thread
bzr push into mysql-5.1-telco-7.0 branch (jonas.oreland:4613 to 4614) jonas oreland20 Oct