List:Commits« Previous MessageNext Message »
From:John David Duncan Date:September 22 2011 6:28pm
Subject:bzr push into mysql-5.5-cluster branch (john.duncan:3528 to 3529)
View as plain text  
 3529 John David Duncan	2011-09-22
      Fix some compiler warnings in ndb/memcache

    modified:
      storage/ndb/memcache/include/Config_v1.h
      storage/ndb/memcache/include/Configuration.h
      storage/ndb/memcache/include/Scheduler.h
      storage/ndb/memcache/include/TableSpec.h
      storage/ndb/memcache/src/ClusterConnectionPool.cc
      storage/ndb/memcache/src/Config_v1.cc
      storage/ndb/memcache/src/Configuration.cc
      storage/ndb/memcache/src/DataTypeHandler.cc
      storage/ndb/memcache/src/Operation.cc
      storage/ndb/memcache/src/QueryPlan.cc
      storage/ndb/memcache/src/Record.cc
      storage/ndb/memcache/src/TabSeparatedValues.cc
      storage/ndb/memcache/src/TableSpec.cc
      storage/ndb/memcache/src/ndb_pipeline.cc
      storage/ndb/memcache/src/ndb_worker.cc
      storage/ndb/memcache/src/schedulers/S_sched.cc
      storage/ndb/memcache/src/schedulers/Stockholm.cc
      storage/ndb/memcache/src/workitem.c
      storage/ndb/memcache/unit/alloc.cc
      storage/ndb/memcache/unit/casbits.cc
      storage/ndb/memcache/unit/incr.cc
      storage/ndb/memcache/unit/test_workqueue.c
 3528 John David Duncan	2011-09-22
      Ignore compiler warnings from bundled memcached & libevent sources

    modified:
      support-files/compiler_warnings.supp
=== modified file 'storage/ndb/memcache/include/Config_v1.h'
--- a/storage/ndb/memcache/include/Config_v1.h	2011-09-12 10:05:07 +0000
+++ b/storage/ndb/memcache/include/Config_v1.h	2011-09-22 18:27:10 +0000
@@ -28,7 +28,7 @@ class config_v1 {
 public:
   /* Public Methods */
   config_v1(Configuration * cf);
-  ~config_v1();
+  virtual ~config_v1();
   bool read_configuration();
   virtual void minor_version_config() = 0;
   

=== modified file 'storage/ndb/memcache/include/Configuration.h'
--- a/storage/ndb/memcache/include/Configuration.h	2011-09-12 10:05:07 +0000
+++ b/storage/ndb/memcache/include/Configuration.h	2011-09-22 18:27:10 +0000
@@ -87,7 +87,7 @@ class Configuration {
   const KeyPrefix * getPrefixForKey(const char *key, int nkey) const;
   const KeyPrefix * getPrefixByInfo(const prefix_info_t info) const;
   const KeyPrefix * getPrefix(int id) const;                          // inlined
-  const KeyPrefix * getNextPrefixForCluster(int cluster_id, KeyPrefix *k) const;
+  const KeyPrefix * getNextPrefixForCluster(uint cluster_id, KeyPrefix *) const;
   void setPrimaryConnectString(const char *);                         // inlined
   void setServerRole(const char *);                                   // inlined
   const char * getServerRole();                                       // inlined

=== modified file 'storage/ndb/memcache/include/Scheduler.h'
--- a/storage/ndb/memcache/include/Scheduler.h	2011-09-12 10:05:07 +0000
+++ b/storage/ndb/memcache/include/Scheduler.h	2011-09-22 18:27:10 +0000
@@ -37,6 +37,7 @@ class Scheduler {
 public:
   /* Public Interface */
   Scheduler() {};
+  virtual ~Scheduler();
   
   /** init() is the called from the main thread, 
       after configuration has been read. 

=== modified file 'storage/ndb/memcache/include/TableSpec.h'
--- a/storage/ndb/memcache/include/TableSpec.h	2011-09-12 10:05:07 +0000
+++ b/storage/ndb/memcache/include/TableSpec.h	2011-09-22 18:27:10 +0000
@@ -45,17 +45,17 @@ class TableSpec {
   void setValueColumns(const char *col1, ...);
   
   /* Public instance variables */
+  int nkeycols;
+  int nvaluecols;
   const char *schema_name;
   const char *table_name;
   const char *math_column;
   const char *flags_column;
   const char *cas_column;
   const char *exp_column;
+  Uint32 static_flags;
   const char ** const key_columns;
   const char ** const value_columns;
-  int nkeycols;
-  int nvaluecols;
-  Uint32 static_flags;
 
   private:
   /* private instance variables */
@@ -91,12 +91,10 @@ inline TableSpec::TableSpec(int nkeys, i
 
 inline TableSpec::TableSpec(const char *db, const char *tab, 
                             int nkeys, int nvals) :
-                            schema_name(db),
-                            table_name(tab),
-                            nkeycols(nkeys), 
-                            nvaluecols(nvals), static_flags(0),
+                            nkeycols(nkeys), nvaluecols(nvals), 
+                            schema_name(db), table_name(tab),
                             math_column(0), flags_column(0), 
-                            cas_column(0), exp_column(0), 
+                            cas_column(0), exp_column(0), static_flags(0),
                             key_columns(new const char *[nkeys]),
                             value_columns(new const char *[nvals]) { 
   must_free.none = 1; 

=== modified file 'storage/ndb/memcache/src/ClusterConnectionPool.cc'
--- a/storage/ndb/memcache/src/ClusterConnectionPool.cc	2011-09-22 07:42:11 +0000
+++ b/storage/ndb/memcache/src/ClusterConnectionPool.cc	2011-09-22 18:27:10 +0000
@@ -18,6 +18,7 @@
  02110-1301  USA
  */
 #include <stdio.h>
+#include <assert.h>
 
 /* C++ files must define __STDC_FORMAT_MACROS in order to get PRIu64 */
 #define __STDC_FORMAT_MACROS 
@@ -142,7 +143,7 @@ Ndb_cluster_connection * ClusterConnecti
    invalidated.
 */
 ClusterConnectionPool::~ClusterConnectionPool() {
-  for(int i = 0 ; i < pool_size ; i++) {
+  for(unsigned int i = 0 ; i < pool_size ; i++) {
     if(pool_connections[i]) {
       delete pool_connections[i];
       pool_connections[i] = 0;
@@ -186,7 +187,7 @@ void ClusterConnectionPool::add_stats(co
 
   Ndb db(main_conn);
   
-  for(int i = 0 ; i < pool_size ; i++) {
+  for(unsigned int i = 0 ; i < pool_size ; i++) {
     pool_connections[i]->collect_client_stats(ndb_stats, Ndb::NumClientStatistics);
   
     for(int s = 0 ; s < Ndb::NumClientStatistics ; s++) {

=== modified file 'storage/ndb/memcache/src/Config_v1.cc'
--- a/storage/ndb/memcache/src/Config_v1.cc	2011-09-12 10:05:07 +0000
+++ b/storage/ndb/memcache/src/Config_v1.cc	2011-09-22 18:27:10 +0000
@@ -137,10 +137,10 @@ void * str_key_dup(const void *key, size
 
 config_v1::config_v1(Configuration * cf) :
   conf(*cf),
-  containers_map(0),
-  policies_map(0),
   server_role_id(-1), 
-  nclusters(0)
+  nclusters(0),
+  policies_map(0),
+  containers_map(0)
 {};
 
 config_v1::~config_v1() {
@@ -375,8 +375,6 @@ TableSpec * config_v1::get_container(cha
 
 TableSpec * config_v1::get_container_record(char *name) {
   TableSpec *container;
-  int res;
-  bool success = true;
   TableSpec spec("ndbmemcache.containers",
                  "name",  
                  "db_schema,db_table,key_columns,value_columns,flags,"
@@ -395,7 +393,7 @@ TableSpec * config_v1::get_container_rec
   
   if(tx->getNdbError().classification == NdbError::NoError) {
     char val[256];
-    char *schema, *table, *keycols, *valcols, *flags, *inccol, *cascol, *expcol;
+    char *schema, *table, *keycols, *valcols;
     
     //  `db_schema` VARCHAR(250) NOT NULL,
     //  `db_table` VARCHAR(250) NOT NULL,
@@ -759,7 +757,7 @@ int server_roles_reload_waiter(Ndb_clust
       DEBUG_PRINT("%d waiting", waiting);
       if(db.nextEvent()) { 
         if(recattr1->isNULL() == 0) {
-          int role_name_len = *(const unsigned char*) recattr1->aRef();
+          uint role_name_len = *(const unsigned char*) recattr1->aRef();
           char *role_name = recattr1->aRef() + 1;
           if(role_name_len == strlen(server_role) && 
              strcmp(server_role, role_name) == 0) { 

=== modified file 'storage/ndb/memcache/src/Configuration.cc'
--- a/storage/ndb/memcache/src/Configuration.cc	2011-09-21 08:30:46 +0000
+++ b/storage/ndb/memcache/src/Configuration.cc	2011-09-22 18:27:10 +0000
@@ -51,10 +51,10 @@ extern EXTENSION_LOGGER_DESCRIPTOR *logg
 Configuration::Configuration(Configuration *old) :
   nclusters(0), 
   nprefixes(0), 
-  config_version(CONFIG_VER_UNKNOWN),
   primary_connect_string(old->primary_connect_string),
-  primary_conn(old->primary_conn),
-  server_role(old->server_role)
+  server_role(old->server_role),
+  config_version(CONFIG_VER_UNKNOWN),
+  primary_conn(old->primary_conn)
 {
   db = new Ndb(primary_conn);
   db->init();
@@ -96,9 +96,9 @@ bool Configuration::connectToPrimary() {
 bool Configuration::openAllConnections() {
   DEBUG_ENTER_METHOD("Configuration::openAllConnections");
   Ndb_cluster_connection *conn;
-  int n_open = 0;
+  unsigned int n_open = 0;
 
-  for(int i = 0; i < nclusters ; i++) {
+  for(unsigned int i = 0; i < nclusters ; i++) {
     ClusterConnectionPool *pool = getConnectionPoolById(i);
 
     /* if the connect string is NULL, or empty, or identical to the primary 
@@ -127,8 +127,8 @@ bool Configuration::openAllConnections()
 */
 bool Configuration::prefetchDictionary() {
   DEBUG_ENTER_METHOD("Configuration::prefetchDictionary");
-  int ok = 0;
-  for(int i = 0 ; i < nprefixes ; i++) {
+  unsigned int ok = 0;
+  for(unsigned int i = 0 ; i < nprefixes ; i++) {
     /* Instantiate an Ndb and a QueryPlan, then discard them. 
        The QueryPlan constructor will make calls into NdbDictionary's 
        getTable() and getColumn() methods. 
@@ -200,9 +200,9 @@ const KeyPrefix * Configuration::getPref
 }
 
 
-const KeyPrefix * Configuration::getNextPrefixForCluster(int cluster_id, 
+const KeyPrefix * Configuration::getNextPrefixForCluster(unsigned int cluster_id, 
                                                          KeyPrefix *k) const {
-  int i = 0;
+  unsigned int i = 0;
 
   if(k) while(prefixes[i] != k && i < nprefixes) i++;
   while(i < nprefixes && prefixes[i]->info.cluster_id != cluster_id) i++;
@@ -214,7 +214,7 @@ const KeyPrefix * Configuration::getNext
 
 void Configuration::disconnectAll() {
   DEBUG_ENTER_METHOD(" Configuration::disconnectAll");
-  for(int i = 0; i < nclusters; i++) {
+  for(unsigned int i = 0; i < nclusters; i++) {
     ClusterConnectionPool *p = getConnectionPoolById(i);
     delete p;
   }

=== modified file 'storage/ndb/memcache/src/DataTypeHandler.cc'
--- a/storage/ndb/memcache/src/DataTypeHandler.cc	2011-09-12 10:05:07 +0000
+++ b/storage/ndb/memcache/src/DataTypeHandler.cc	2011-09-22 18:27:10 +0000
@@ -895,7 +895,7 @@ int dth_write32_year(Int32 value, const 
 /***** DATE & TIME HELPERS *****/
 
 typedef struct {
-  unsigned long year, month, day, hour, minute, second, fraction;
+  unsigned int year, month, day, hour, minute, second, fraction;
   bool is_negative;
 } time_helper;
 
@@ -935,7 +935,7 @@ DateTime_CopyBuffer::DateTime_CopyBuffer
   if(! too_long) {
     if(*c == '-' || *c == '+') *buf++ = *c++;  // tolerate initial + or -
 
-    for(register int i = 0 ; i < len && *c != 0 ; c++, i++ )
+    for(register unsigned int i = 0 ; i < len && *c != 0 ; c++, i++ )
       if(isdigit(*c))
         *buf++ = *c; 
     *buf = 0;
@@ -957,7 +957,7 @@ int dth_decode_date(const NdbDictionary:
   tm.month = (encoded_date >> 5 & 15); // four bits
   tm.year  = (encoded_date >> 9);
   
-  return sprintf(str, "%04d-%02d-%02d",tm.year, tm.month, tm.day) + 1;
+  return sprintf(str, "%04du-%02du-%02du",tm.year, tm.month, tm.day) + 1;
 }
 
 size_t dth_length_date(const NdbDictionary::Column *col, const void *buf) {
@@ -1001,7 +1001,7 @@ int dth_decode_time(const NdbDictionary:
   factor_HHMMSS(& tm, int_time);
   
   /* Stringify it */
-  return sprintf(str, "%s%02d:%02d:%02d", tm.is_negative ? "-" : "" ,
+  return sprintf(str, "%s%02du:%02du:%02du", tm.is_negative ? "-" : "" ,
                  tm.hour, tm.minute, tm.second);
 }
 
@@ -1012,7 +1012,6 @@ size_t dth_length_time(const NdbDictiona
 int dth_encode_time(const NdbDictionary::Column *, size_t len, 
                     size_t offset, const char *str, void *buf) {
   Int32  int_time;
-  time_helper tm = { 0,0,0,0,0,0,0, false };
   
   /* Make a safe (null-terminated) copy */
   DateTime_CopyBuffer copybuff(len, str);
@@ -1043,7 +1042,7 @@ int dth_decode_datetime(const NdbDiction
   factor_YYYYMMDD(& tm, int_date);
   
   /* Stringify it */
-  return sprintf(str, "%04d-%02d-%02d %02d:%02d:%02d", tm.year, tm.month, 
+  return sprintf(str, "%04du-%02du-%02du %02du:%02du:%02du", tm.year, tm.month, 
                  tm.day, tm.hour, tm.minute, tm.second);
  }
 
@@ -1054,7 +1053,6 @@ size_t dth_length_datetime(const NdbDict
 int dth_encode_datetime(const NdbDictionary::Column *, size_t len, 
                         size_t offset, const char *str, void *buf) {
   uint64_t int_datetime;
-  time_helper tm = { 0,0,0,0,0,0,0, false };
   
   /* Make a safe (null-terminated) copy */
   DateTime_CopyBuffer copybuff(len, str);
@@ -1087,7 +1085,7 @@ size_t dth_length_float(const NdbDiction
                         const void *buf) {
   char stack_copy[16];
   double d = (double) (* (float *) buf);
-  return snprintf(stack_copy, 16, "G", d) + 1;
+  return snprintf(stack_copy, 16, "%G", d) + 1;
 }
 
 int dth_decode_double(const NdbDictionary::Column *col, 
@@ -1124,7 +1122,7 @@ int dth_decode_decimal(const NdbDictiona
   int scale = col->getScale();
   int prec  = col->getPrecision();
   int len = scale + prec + 3;
-  int r = decimal_bin2str(buf, col->getSizeInBytes(), prec, scale, str, len);
+  decimal_bin2str(buf, col->getSizeInBytes(), prec, scale, str, len);
   return len;
 }
 

=== modified file 'storage/ndb/memcache/src/Operation.cc'
--- a/storage/ndb/memcache/src/Operation.cc	2011-09-12 10:05:07 +0000
+++ b/storage/ndb/memcache/src/Operation.cc	2011-09-22 18:27:10 +0000
@@ -35,9 +35,9 @@
 */
 
 
-Operation::Operation(QueryPlan *p, int o, char *kbuf) : plan(p), 
-                                                        op(o),
-                                                        key_buffer(kbuf)
+Operation::Operation(QueryPlan *p, int o, char *kbuf) : key_buffer(kbuf), 
+                                                        plan(p), 
+                                                        op(o)                                                        
 {
   if(op == OP_READ) record = plan->val_record;
   else if(op == OP_FLUSH) record = plan->key_record;  // scanning delete 

=== modified file 'storage/ndb/memcache/src/QueryPlan.cc'
--- a/storage/ndb/memcache/src/QueryPlan.cc	2011-09-22 02:13:18 +0000
+++ b/storage/ndb/memcache/src/QueryPlan.cc	2011-09-22 18:27:10 +0000
@@ -76,14 +76,14 @@ inline const NdbDictionary::Column *get_
 QueryPlan::QueryPlan(Ndb *my_ndb, const TableSpec *my_spec, PlanOpts opts)  : 
   initialized(0), 
   dup_numbers(false),
-  is_scan(false), 
+  db(my_ndb),
   spec(my_spec), 
-  db(my_ndb)
+  is_scan(false)
 {  
   const NdbDictionary::Column *col;
   bool op_ok = false; 
   bool last_value_col_is_int = false;
-  int  first_value_col_id;
+  int  first_value_col_id = -1;
   
   /* Get the data dictionary */
   db->setDatabaseName(spec->schema_name);
@@ -149,6 +149,7 @@ QueryPlan::QueryPlan(Ndb *my_ndb, const 
     if(i == 0) first_value_col_id = this_col_id;
     last_value_col_is_int = is_integer(table, this_col_id);
   }
+  assert(first_value_col_id >= 0);
 
   if(spec->cas_column) {                                        // CAS
     col = get_ndb_col(spec, table, spec->cas_column);
@@ -235,8 +236,8 @@ const NdbDictionary::Index * QueryPlan::
   dict->listIndexes(list, spec->table_name);
 
   /* First look for a unique index.  All columns must match. */
-  for(int i = 0; i < list.count ; i++) {
-  int nmatches, j;
+  for(unsigned int i = 0; i < list.count ; i++) {
+  unsigned int nmatches, j;
     idx = dict->getIndex(list.elements[i].name, spec->table_name);
     if(idx && idx->getType() == NdbDictionary::Index::UniqueHashIndex) {
       if(idx->getNoOfColumns() == spec->nkeycols) { 
@@ -250,7 +251,7 @@ const NdbDictionary::Index * QueryPlan::
 
   /* Then look for an ordered index.  A prefix match is OK. */
   /* Return the first suitable index we find (which might not be the best) */
-  for(int i = 0; i < list.count ; i++) {
+  for(unsigned int i = 0; i < list.count ; i++) {
     idx = dict->getIndex(list.elements[i].name, spec->table_name);
     if(idx && idx->getType() == NdbDictionary::Index::OrderedIndex) {
       if(idx->getNoOfColumns() >= spec->nkeycols) {  

=== modified file 'storage/ndb/memcache/src/Record.cc'
--- a/storage/ndb/memcache/src/Record.cc	2011-09-12 10:05:07 +0000
+++ b/storage/ndb/memcache/src/Record.cc	2011-09-22 18:27:10 +0000
@@ -213,7 +213,6 @@ bool Record::setIntValue(int id, int val
   int index = map[id];
   NumericHandler * h = handlers[index]->native_handler;
   const char * buffer = data + specs[index].offset;
-  int i = 0;
   
   if(h) {
     return (h->write_int32(value,buffer) > 0);
@@ -243,7 +242,6 @@ Uint64 Record::getUint64Value(int id, ch
 bool Record::setUint64Value(int id, Uint64 value, char *data) const {
   int index = map[id];
   const char * buffer = data + specs[index].offset;
-  Uint64 i = 0;
 
   if(specs[index].column->getType() != NdbDictionary::Column::Bigunsigned) {
     logger->log(LOG_WARNING, 0, "Operation failed - column %s must be BIGINT UNSIGNED",
@@ -252,6 +250,7 @@ bool Record::setUint64Value(int id, Uint
   }
   
   * ((Uint64 *) buffer) = value;
+  return true;
 }  
 
 

=== modified file 'storage/ndb/memcache/src/TabSeparatedValues.cc'
--- a/storage/ndb/memcache/src/TabSeparatedValues.cc	2011-09-12 10:05:07 +0000
+++ b/storage/ndb/memcache/src/TabSeparatedValues.cc	2011-09-22 18:27:10 +0000
@@ -23,7 +23,7 @@
 #include "TabSeparatedValues.h"
 
 TabSeparatedValues::TabSeparatedValues(const char *string, Uint32 max_parts, size_t length) :
-  parts(0), index(0)
+  index(0), parts(0)
 {
   size_t parsed_len = 0;
 

=== modified file 'storage/ndb/memcache/src/TableSpec.cc'
--- a/storage/ndb/memcache/src/TableSpec.cc	2011-09-22 02:13:18 +0000
+++ b/storage/ndb/memcache/src/TableSpec.cc	2011-09-22 18:27:10 +0000
@@ -78,8 +78,8 @@ int TableSpec::build_column_list(const c
 */
 TableSpec::TableSpec(const char *sqltable,
                      const char *keycols, const char *valcols) :
-  math_column(0), flags_column(0), static_flags(0), 
-  cas_column(0), exp_column(0),
+  math_column(0), flags_column(0), 
+  cas_column(0), exp_column(0), static_flags(0),
   key_columns(new const char *[MAX_KEY_COLUMNS]) ,
   value_columns(new const char *[MAX_VAL_COLUMNS]) 
 {
@@ -107,16 +107,16 @@ TableSpec::TableSpec(const char *sqltabl
 TableSpec::TableSpec(const TableSpec &t) :
   nkeycols(t.nkeycols) ,
   nvaluecols(t.nvaluecols) ,
-  key_columns(new const char *[t.nkeycols]) ,
-  value_columns(new const char *[t.nvaluecols]) ,  
   schema_name(strdup(t.schema_name)) ,
   table_name(strdup(t.table_name)) , 
-  math_column(strdup(t.math_column)) 
+  math_column(strdup(t.math_column)) ,
+  key_columns(new const char *[t.nkeycols]) ,
+  value_columns(new const char *[t.nvaluecols])
 { 
    must_free.schema_name = must_free.table_name = 1;
    must_free.special_cols = 1;
    if(nkeycols) {
-     for(int i = 0; i < nkeycols ; i++) 
+    for(int i = 0; i < nkeycols ; i++) 
        key_columns[i] = strdup(t.key_columns[i]);
      must_free.all_key_cols = 1;
    }
@@ -125,7 +125,7 @@ TableSpec::TableSpec(const TableSpec &t)
        value_columns[i] = strdup(t.value_columns[i]);
      must_free.all_val_cols = 1;
   }
-};
+}
 
 
 /* destructor */
@@ -150,7 +150,7 @@ TableSpec::~TableSpec() {
   }  
   delete[] key_columns;
   delete[] value_columns;
-};
+}
 
 
 void TableSpec::setKeyColumns(const char *col1, ...) {

=== modified file 'storage/ndb/memcache/src/ndb_pipeline.cc'
--- a/storage/ndb/memcache/src/ndb_pipeline.cc	2011-09-12 10:05:07 +0000
+++ b/storage/ndb/memcache/src/ndb_pipeline.cc	2011-09-22 18:27:10 +0000
@@ -42,7 +42,7 @@
 
 /* globals (exported; also used by workitem.c) */
 int workitem_class_id;
-int workitem_actual_inline_buffer_size;
+unsigned int workitem_actual_inline_buffer_size;
 
 /* file-scope private variables */
 static int pool_slab_class_id;
@@ -133,15 +133,12 @@ void pipeline_add_stats(ndb_pipeline *se
                         ADD_STAT add_stat, 
                         const void *cookie) {
   char key[128];
-  char val[128];
-  int klen, vlen;
-  char id;
 
   DEBUG_ENTER();
   const Configuration & conf = get_Configuration();
 
   if(strncasecmp(stat_key,"ndb",3) == 0) {
-    for(int i = 0 ; i < conf.nclusters ; i ++) {
+    for(unsigned int i = 0 ; i < conf.nclusters ; i ++) {
       sprintf(key, "cl%d", i);
       conf.getConnectionPoolById(i)->add_stats(key, add_stat, cookie);
     }
@@ -331,7 +328,7 @@ void memory_pool_free(memory_pool *pool)
   do {
     array = next;
     next = (allocation_reference *) array->pointer;
-    for(int i = 1; i < array->d.cells_idx ; i++) {  // free each block
+    for(unsigned int i = 1; i < array->d.cells_idx ; i++) {  // free each block
       allocation_reference &r = array[i];
       pipeline_free(pool->pipeline, r.pointer, r.d.slab_class);
     }
@@ -400,7 +397,7 @@ int init_slab_class(allocator_slab_class
    once the scheduler has been started, you must hold p->lock to call this. 
 */
 int malloc_new_slab(allocator_slab_class *c) {  
-  int num = c->perslab;
+  unsigned int num = c->perslab;
   void **new_list;
   char *ptr;
 
@@ -415,7 +412,7 @@ int malloc_new_slab(allocator_slab_class
   void **cur = c->list;
   ptr = (char *) malloc(ALLIGATOR_SLAB_SIZE);
   if (ptr == 0) return 0;
-  for (int i = 0; i < num; i++) {
+  for (unsigned int i = 0; i < num; i++) {
     *cur = ptr;       /* push the pointer onto the list */
     cur++;            /* bump the list forward one position */
     ptr += c->size;   /* bump the pointer to the next block */

=== modified file 'storage/ndb/memcache/src/ndb_worker.cc'
--- a/storage/ndb/memcache/src/ndb_worker.cc	2011-09-12 10:05:07 +0000
+++ b/storage/ndb/memcache/src/ndb_worker.cc	2011-09-22 18:27:10 +0000
@@ -262,7 +262,7 @@ op_status_t worker_do_write(workitem *wq
       uint64_t number;
       const int len = wqitem->cache_item->nbytes;
       char value[32];
-      for(size_t i = 0 ; i  < len ; i++) 
+      for(int i = 0 ; i  < len ; i++) 
         value[i] = * (hash_item_get_data(wqitem->cache_item) + i); 
       value[len] = 0;
       if(safe_strtoull(value, &number)) { // numeric: set the math column
@@ -519,7 +519,6 @@ void DB_callback(int result, NdbTransact
   workitem *wqitem = (workitem *) itemptr;
   ndb_pipeline * & pipeline = wqitem->pipeline;
   status_block * return_status;
-  ENGINE_ERROR_CODE io_status = ENGINE_SUCCESS;
   bool tx_did_match = false;
     
   /************** Error handling ***********/  
@@ -684,7 +683,7 @@ void rewrite_callback(int result, NdbTra
 void incr_callback(int result, NdbTransaction *tx, void *itemptr) {
   workitem *wqitem = (workitem *) itemptr;
   ndb_pipeline * & pipeline = wqitem->pipeline;
-  status_block * return_status;
+  status_block * return_status = 0;
   ENGINE_ERROR_CODE io_status = ENGINE_SUCCESS;
 
   /*  read  insert  update cr_flag response

=== modified file 'storage/ndb/memcache/src/schedulers/S_sched.cc'
--- a/storage/ndb/memcache/src/schedulers/S_sched.cc	2011-09-12 10:05:07 +0000
+++ b/storage/ndb/memcache/src/schedulers/S_sched.cc	2011-09-22 18:27:10 +0000
@@ -368,7 +368,7 @@ ENGINE_ERROR_CODE S::SchedulerWorker::sc
       
       response_code = ENGINE_EWOULDBLOCK;
       break;
-    case op_not_supported:
+   case op_not_supported:
       DEBUG_PRINT("op_status is op_not_supported");
       response_code = ENGINE_ENOTSUP;
       break;
@@ -376,6 +376,10 @@ ENGINE_ERROR_CODE S::SchedulerWorker::sc
       DEBUG_PRINT("op_status is op_overflow");
       response_code = ENGINE_E2BIG;
       break;
+    case op_async_sent:
+      DEBUG_PRINT("op_async_sent could be a bug");
+      response_code = ENGINE_FAILED;
+      break;      
     case op_failed:
       DEBUG_PRINT("op_status is op_failed");
       response_code = ENGINE_FAILED;

=== modified file 'storage/ndb/memcache/src/schedulers/Stockholm.cc'
--- a/storage/ndb/memcache/src/schedulers/Stockholm.cc	2011-09-12 10:05:07 +0000
+++ b/storage/ndb/memcache/src/schedulers/Stockholm.cc	2011-09-22 18:27:10 +0000
@@ -50,7 +50,7 @@ void Scheduler_stockholm::init(int my_th
   const Configuration & conf = get_Configuration();
 
   /* How many NDB instances are needed per cluster? */
-  for(int c = 0 ; c < conf.nclusters ; c++) {
+  for(unsigned int c = 0 ; c < conf.nclusters ; c++) {
     ClusterConnectionPool *pool = conf.getConnectionPoolById(c);
     double total_ndb_objects = conf.figureInFlightTransactions(c);
     cluster[c].nInst = (int) total_ndb_objects / nthreads;
@@ -59,7 +59,7 @@ void Scheduler_stockholm::init(int my_th
   }
   
   // Get the NDB instances. 
-  for(int c = 0 ; c < conf.nclusters ; c++) {
+  for(unsigned int c = 0 ; c < conf.nclusters ; c++) {
     cluster[c].instances = (NdbInstance**) 
       calloc(cluster[c].nInst, sizeof(NdbInstance *));
     
@@ -81,12 +81,12 @@ void Scheduler_stockholm::init(int my_th
   /* Hoard a transaction (an API connect record) for each Ndb object.  This
      first call to startTransaction() will send TC_SEIZEREQ and wait for a 
      reply, but later at runtime startTransaction() should return immediately.
-     Also, for each NDB instance, ore-build the QueryPlan for the default key prefix.
+     Also, for each NDB instance, pre-build the QueryPlan for the default key prefix.
      TODO? Start one tx on each data node.
   */
   QueryPlan *plan;
-  const KeyPrefix *default_prefix = conf.getDefaultPrefix();
-  for(int c = 0 ; c < conf.nclusters ; c++) {
+  const KeyPrefix *default_prefix = conf.getDefaultPrefix();  // TODO: something
+  for(unsigned int c = 0 ; c < conf.nclusters ; c++) {
     const KeyPrefix *prefix = conf.getNextPrefixForCluster(c, NULL); 
     if(prefix) {
       NdbTransaction ** txlist;
@@ -109,7 +109,7 @@ void Scheduler_stockholm::init(int my_th
      The engine thread will add items to this queue, and the commit thread will 
      consume them. 
   */
-  for(int c = 0 ; c < conf.nclusters; c++) {
+  for(unsigned int c = 0 ; c < conf.nclusters; c++) {
     cluster[c].queue = (struct workqueue *) malloc(sizeof(struct workqueue));
     workqueue_init(cluster[c].queue, 8192, 1);
   }  
@@ -140,11 +140,11 @@ void Scheduler_stockholm::shutdown() {
   const Configuration & conf = get_Configuration();
 
   /* Shut down the workqueues */
-  for(int c = 0 ; c < conf.nclusters; c++)
+  for(unsigned int c = 0 ; c < conf.nclusters; c++)
     workqueue_abort(cluster[c].queue);
   
   /* Close all of the Ndbs */
-  for(int c = 0 ; c < conf.nclusters; c++) {
+  for(unsigned int c = 0 ; c < conf.nclusters; c++) {
     for(int i = 0 ; i < cluster[c].nInst ; i++) {
       delete cluster[c].instances[i];
     }
@@ -231,7 +231,7 @@ void Scheduler_stockholm::add_stats(cons
                                     const void * cookie) {
   char key[128];
   char val[128];
-  int klen, vlen, p;
+  int klen, vlen;
   const Configuration & conf = get_Configuration();
 
   if(strncasecmp(stat_key, "reconf", 6) == 0) {
@@ -239,7 +239,7 @@ void Scheduler_stockholm::add_stats(cons
     return;
   }
   
-  for(int c = 0 ; c < conf.nclusters; c++) {  
+  for(unsigned int c = 0 ; c < conf.nclusters; c++) {
     klen = sprintf(key, "pipeline_%d_cluster_%d_commit_cycles", pipeline->id, c);
     vlen = sprintf(val, "%"PRIu64, cluster[c].stats.cycles);
     add_stat(key, klen, val, vlen, cookie);

=== modified file 'storage/ndb/memcache/src/workitem.c'
--- a/storage/ndb/memcache/src/workitem.c	2011-09-12 10:05:07 +0000
+++ b/storage/ndb/memcache/src/workitem.c	2011-09-22 18:27:10 +0000
@@ -29,7 +29,7 @@
 
 
 extern int workitem_class_id;   /* from ndb_pipeline.cc */
-extern int workitem_actual_inline_buffer_size;  /* from ndb_pipeline.cc */
+extern unsigned int workitem_actual_inline_buffer_size;  /* from ndb_pipeline.cc */
 
 const char * workitem_get_key_suffix(workitem *item) {
   return item->key + (item->base.nkey - item->base.nsuffix);

=== modified file 'storage/ndb/memcache/unit/alloc.cc'
--- a/storage/ndb/memcache/unit/alloc.cc	2011-09-12 10:05:07 +0000
+++ b/storage/ndb/memcache/unit/alloc.cc	2011-09-22 18:27:10 +0000
@@ -32,7 +32,7 @@ int run_allocator_test(QueryPlan *, int 
   
   memory_pool *p1 = pipeline_create_memory_pool(p);
   int sz = 13;
-  int tot = 0;
+  uint tot = 0;
   void *v1, *v2;
   for(int i = 0 ; i < 25 ; i++) {
     v1 = memory_pool_alloc(p1, sz);     tot += sz;
@@ -40,10 +40,10 @@ int run_allocator_test(QueryPlan *, int 
     sz = (int) (sz * 1.25);
   }
 
-  detail(v, "Total requested: %d  granted: %d \n", tot, p1->size);
+  detail(v, "Total requested: %d  granted: %lu \n", tot, p1->size);
   require(p1->size >= tot);
   /* Get total before freeing the pool */
-  int old_total = p1->size + p1->total;
+  uint old_total = p1->size + p1->total;
   memory_pool_free(p1);
   /* The new total must equal the old_total */
   require(old_total == p1->total);
@@ -56,7 +56,7 @@ int run_allocator_test(QueryPlan *, int 
     int free_slot   = p->alligator[i].free_idx;
     size_t alloc_sz = p->alligator[i].total;
     
-    detail(v, "Class %d idx %d used %d \n", i, list_size - free_slot, alloc_sz);
+    detail(v, "Class %d idx %d used %lu \n", i, list_size - free_slot, alloc_sz);
     /* After we destroy the pool, every slab must have 0 allocated blocks */
     require(list_size - free_slot == 0);
     /* But it must have a non-zero size, indicating that it has been used */

=== modified file 'storage/ndb/memcache/unit/casbits.cc'
--- a/storage/ndb/memcache/unit/casbits.cc	2011-09-12 10:05:07 +0000
+++ b/storage/ndb/memcache/unit/casbits.cc	2011-09-22 18:27:10 +0000
@@ -54,6 +54,6 @@ void worker_set_cas(int verbose, Uint64 
     did_inc = atomic_cmp_swap_int(& engine_cas_lo, cas_lo, cas_lo + 1);
   } while(! did_inc);
   *cas = Uint64(cas_lo) | (Uint64(cas_hi) << 32);
-  detail(verbose, "%ull \n", (unsigned long long) *cas);
+  detail(verbose, "%llu \n", (unsigned long long) *cas);
 }
 

=== modified file 'storage/ndb/memcache/unit/incr.cc'
--- a/storage/ndb/memcache/unit/incr.cc	2011-09-12 10:05:07 +0000
+++ b/storage/ndb/memcache/unit/incr.cc	2011-09-22 18:27:10 +0000
@@ -72,7 +72,6 @@ int do_incr(int v, QueryPlan *plan, cons
             bool create, bool update, Uint64 *val) {
   int r;
   char key[50];
-  char value[50];
   char ndbkeybuffer[300];
   char ndbrowbuffer1[8192];
   char ndbrowbuffer2[8192];
@@ -135,7 +134,6 @@ int do_incr(int v, QueryPlan *plan, cons
   
   // NdbOperation #3:  update (interpreted)
   if (update) {
-    int i;
     NdbOperation::OperationOptions options;
     const Uint32 program_size = 10;
     Uint32 program[program_size];

=== modified file 'storage/ndb/memcache/unit/test_workqueue.c'
--- a/storage/ndb/memcache/unit/test_workqueue.c	2011-09-12 10:05:07 +0000
+++ b/storage/ndb/memcache/unit/test_workqueue.c	2011-09-22 18:27:10 +0000
@@ -150,7 +150,7 @@ void * producer_thread(void *arg) {
   struct workqueue *queue = testinfo->q;
   int batchsize = testinfo->producer_batch_size;
   int sleeptime = testinfo->producer_median_sleep;
-  int iterations = testinfo->iterations + 1;
+  unsigned int iterations = testinfo->iterations + 1;
   
   /* Generate consecutive integers, in random batches.  And sleep for random
      amounts of time between them.  Put them on the queue. 
@@ -240,7 +240,7 @@ int sleep_microseconds(int usec) {
 
 
 void express_nanosec(Uint64 ns) {
-  char *units[4] = { "ns", "us", "ms", "s" };
+  const char *units[4] = { "ns", "us", "ms", "s" };
   int p;
   for(p = 0; ns > 1000 && p < 4; ns /= 1000, p++);
   

No bundle (reason: useless for push emails).
Thread
bzr push into mysql-5.5-cluster branch (john.duncan:3528 to 3529) John David Duncan23 Sep