List:Commits« Previous MessageNext Message »
From:John David Duncan Date:February 8 2013 5:21pm
Subject:bzr push into mysql-5.5-cluster-7.2 branch (john.duncan:4167 to 4168)
View as plain text  
 4168 John David Duncan	2013-02-08 [merge]
      Local merge from 7.2.11-bugs

    modified:
      mysql-test/lib/My/Memcache.pm
      storage/ndb/memcache/include/Operation.h
      storage/ndb/memcache/include/ndbmemcache_global.h
      storage/ndb/memcache/include/workitem.h
      storage/ndb/memcache/sandbox.sh.in
      storage/ndb/memcache/src/ExternalValue.cc
      storage/ndb/memcache/src/Operation.cc
      storage/ndb/memcache/src/Record.cc
      storage/ndb/memcache/src/ndb_worker.cc
      storage/ndb/memcache/src/schedulers/S_sched.cc
      storage/ndb/memcache/src/schedulers/Stockholm.cc
      storage/ndb/memcache/src/workitem.c
 4167 Martin Skold	2013-02-08 [merge]
      Merge from 7.1

    modified:
      mysql-test/suite/ndb/r/ndb_database.result
      mysql-test/suite/ndb/r/ndb_multi.result
      mysql-test/suite/ndb/t/ndb_database.test
      mysql-test/suite/ndb/t/ndb_multi.test
=== modified file 'mysql-test/lib/My/Memcache.pm'
--- a/mysql-test/lib/My/Memcache.pm	2012-10-12 21:48:24 +0000
+++ b/mysql-test/lib/My/Memcache.pm	2013-02-08 04:21:39 +0000
@@ -481,6 +481,7 @@ sub get_binary_response {
     $body .= $buf;
   }
   $self->{error} = $error_message{$status};
+  $self->{cas} = ($cas_hi * (2 ** 32)) + $cas_lo;
 
   # Packet structure is: header .. extras .. key .. value 
   my $l = $extra_len + $key_len;

=== modified file 'storage/ndb/memcache/include/Operation.h'
--- a/storage/ndb/memcache/include/Operation.h	2012-07-18 06:54:14 +0000
+++ b/storage/ndb/memcache/include/Operation.h	2013-02-08 04:21:39 +0000
@@ -57,11 +57,15 @@ private:
 
   /* Private methods */
   void set_default_record();
+  bool setFieldsInRow(int offset, const char *type, int, const char *, size_t);
   
 public: 
-  /* Public Methods: */
+  // Constructors
   Operation(QueryPlan *p, int o = 1, char *kbuf = 0);
-  Operation(workitem *);
+  Operation(workitem *, Uint32 saved_row_mask = 0);
+
+  // Public Methods
+  void save_row_mask(Uint32 * loc);
   
   // Select columns for reading
   void readSelectedColumns();
@@ -81,6 +85,7 @@ public: 
   void setNullBits();
   void clearNullBits();
   bool setKeyFieldsInRow(int nparts, const char *key_str, size_t key_str_len);
+  bool setValueFieldsInRow(int nparts, const char *val_str, size_t val_str_len);
   bool setColumn(int id, const char *strval, size_t strlen);
   bool setColumnInt(int id, int value);
   bool setColumnBigUnsigned(int id, Uint64 value);
@@ -114,6 +119,7 @@ public: 
 
   // write
   const NdbOperation *writeTuple(NdbTransaction *tx);
+  const NdbOperation *insertRow(NdbTransaction *tx);
   const NdbOperation *insertTuple(NdbTransaction *tx, 
                                   NdbOperation::OperationOptions *options = 0);
   const NdbOperation *updateTuple(NdbTransaction *tx,
@@ -130,6 +136,10 @@ public: 
 
 /* ================= Inline methods ================= */
 
+inline void Operation::save_row_mask(Uint32 * loc) {
+  memcpy(loc, row_mask, 4);
+}
+
 /* Select columns for reading */
 
 inline void Operation::readSelectedColumns() {
@@ -156,8 +166,8 @@ inline void Operation::clearKeyNullBits(
   plan->key_record->clearNullBits(key_buffer);
 }
 
-inline bool Operation::setKeyPart(int id, const char *strval, size_t strlen) {
-  int s = plan->key_record->encode(id, strval, strlen, key_buffer, key_mask);
+inline bool Operation::setKeyPart(int id, const char *strval, size_t str_len) {
+  int s = plan->key_record->encode(id, strval, str_len, key_buffer, key_mask);
   return (s > 0);
 }
 
@@ -205,6 +215,15 @@ inline void Operation::setColumnNotNull(
   record->setNotNull(id, buffer, row_mask);
 }
 
+inline bool Operation::setKeyFieldsInRow(int nparts, const char *dbkey, size_t key_len) {
+  return setFieldsInRow(COL_STORE_KEY, "key", nparts, dbkey, key_len);
+}
+
+inline bool Operation::setValueFieldsInRow(int nparts, const char *dbval, size_t val_len) {
+  return setFieldsInRow(COL_STORE_VALUE, "value", nparts, dbval, val_len);
+}
+
+
 /* Methods for reading columns from the response */
 
 inline int Operation::nValues() const {
@@ -257,6 +276,10 @@ inline const NdbOperation * Operation::w
                         plan->row_record->ndb_record, buffer, row_mask);
 }
 
+inline const NdbOperation * Operation::insertRow(NdbTransaction *tx) { 
+  return tx->insertTuple(plan->row_record->ndb_record, buffer, row_mask);
+}
+
 inline const NdbOperation * 
   Operation::insertTuple(NdbTransaction *tx,
                          NdbOperation::OperationOptions * options) { 

=== modified file 'storage/ndb/memcache/include/ndbmemcache_global.h'
--- a/storage/ndb/memcache/include/ndbmemcache_global.h	2011-09-12 10:05:07 +0000
+++ b/storage/ndb/memcache/include/ndbmemcache_global.h	2013-02-08 04:21:39 +0000
@@ -59,6 +59,7 @@ enum {  
 typedef enum {
   op_not_supported,
   op_failed,
+  op_bad_key,
   op_overflow,
   op_async_prepared,
   op_async_sent

=== modified file 'storage/ndb/memcache/include/workitem.h'
--- a/storage/ndb/memcache/include/workitem.h	2012-03-05 17:51:21 +0000
+++ b/storage/ndb/memcache/include/workitem.h	2013-02-08 15:54:41 +0000
@@ -43,7 +43,7 @@ typedef struct workitem {
     unsigned complete    : 1;  /*! is this operation finished? */
     unsigned _unused_2   : 2;  /*! */
     unsigned reschedule  : 1;  /*! inform scheduler to send and poll again */
-    unsigned cas_owner   : 1;  /*! set if this engine owns the CAS ID */
+    unsigned cas_owner   : 1;  /*! set if the NDB engine must create a CAS ID */
   } base;
   unsigned int id;
   struct workitem *previous;   /*! used to chain workitems in multi-key get */

=== modified file 'storage/ndb/memcache/sandbox.sh.in'
--- a/storage/ndb/memcache/sandbox.sh.in	2011-12-11 07:31:26 +0000
+++ b/storage/ndb/memcache/sandbox.sh.in	2013-02-08 04:35:34 +0000
@@ -1,51 +1,78 @@
 #!/bin/sh
 
-prefix=@CMAKE_INSTALL_PREFIX@
+sourcedir=@PROJECT_SOURCE_DIR@
+builddir=@PROJECT_BINARY_DIR@
+installdir=@CMAKE_INSTALL_PREFIX@
+
+# Install Trees:
 bindir=@INSTALL_BINDIR@
 libexecdir=@INSTALL_SBINDIR@
 libdir=@INSTALL_LIBDIR@
 scriptsdir=@INSTALL_SCRIPTDIR@
-memcachedir=@MEMCACHED_ROOT_DIR@
-sourcetree=@CMAKE_CURRENT_SOURCE_DIR@
-installtree=@CMAKE_INSTALL_PREFIX@/memcache-api
-memcached_binary=@MEMCACHED_BIN_PATH@
+sharedir=@INSTALL_MYSQLSHAREDIR@
+memcachedbin=@MEMCACHED_BIN_PATH@
+
+SOURCE_DIR=$sourcedir
+BUILD_DIR=$builddir
+INSTALL_DIR=$installdir
 
 MYSELF=`who am i | awk '{print $1}'`
-MYSQL_PREFIX=$prefix
-MYSQL_BIN=$prefix/$bindir
-MYSQL_LIBEXEC=$prefix/$libexecdir
-MYSQL_SCRIPTS=$prefix/$scriptsdir
-MYSQL_LIB=$prefix/$libdir
-MEMCACHE_BASE=$memcachedir
-SOURCE_TREE=$sourcetree
-HOME_BASE=$sourcetree   # fallback to source tree 
+WORKING_DIR=`pwd`
+
+if test -w $WORKING_DIR 
+ then
+   HOME_BASE=$WORKING_DIR
+ else
+   HOME_BASE=$HOME 
+fi
 
-if test `pwd` = $SOURCE_TREE
+# In an in-source build, BUILD_DIR = SOURCE_DIR.
+# Try BUILD_DIR first.  If it does not exist, we assume this is an install,
+# so we use INSTALL_DIR.
+if test -d $BUILD_DIR
  then
-   # If we are in the source tree, use the memcached and engine there
-   memcached_binary=$MEMCACHE_BASE/memcached
-   NDB_ENGINE_SO=$SOURCE_TREE/ndb_engine.so
+  # Use build dir
+  MEMCACHED_DIR=$BUILD_DIR/storage/ndb/memcache/extra/memcached
+  MGM_CLIENT_DIR=$BUILD_DIR/storage/ndb/src/mgmclient
+  MGM_SERVER_DIR=$BUILD_DIR/storage/ndb/src/mgmsrv
+  NDBD_DIR=$BUILD_DIR/storage/ndb/src/kernel
+  MYSQL_CLIENT_DIR=$BUILD_DIR/client
+  MYSQL_SERVER_DIR=$BUILD_DIR/scripts
+  ENGINE_SO_DIR=$BUILD_DIR/storage/ndb/memcache
+  METADATA_DIR=$BUILD_DIR/scripts/
+  SCRIPTS_DIR=$INSTALL_DIR/$scriptsdir    # See NOTE below
  else
-   test -d $installtree && HOME_BASE=$installtree
-   NDB_ENGINE_SO=$MYSQL_LIB/ndb_engine.so
+  # Use install dir
+  MEMCACHED_DIR=`dirname $memcachedbin`
+  MGM_CLIENT_DIR=$INSTALL_DIR/$libexecdir
+  MGM_SERVER_DIR=$INSTALL_DIR/$libexecdir
+  NDBD_DIR=$INSTALL_DIR/$libexecdir
+  MYSQL_CLIENT_DIR=$INSTALL_DIR/$bindir
+  MYSQL_SERVER_DIR=$INSTALL_DIR/$libexecdir
+  ENGINE_SO_DIR=$INSTALL_DIR/$libdir
+  METADATA_DIR=$sharedir/memcache-api
+  SCRIPTS_DIR=$INSTALL_DIR/$scriptsdir
 fi
 
+## NOTE.
+##  mysql_install_db only works from an installed copy.
+##  mtr fixes this by reimplementing mysql_install_db internally.
+##  Our solution for now is to hope we find a copy in the install_dir
+ 
 test_paths() {
-  test_path $MYSQL_BIN ndb_mgm
-  test_path $MYSQL_BIN mysql
-  test_path $MYSQL_BIN mysqladmin
-  test_path $MYSQL_LIBEXEC ndb_mgmd
-  test_path $MYSQL_LIBEXEC ndbd
-  test_path $MYSQL_LIBEXEC mysqld_safe
-  test_path $MYSQL_SCRIPTS mysql_install_db
-  if [ ! -x $memcached_binary ] 
-    then 
-      echo "Cannot execute $memcached_binary"
-      exit
-  fi
+  test_exec_path $MGM_CLIENT_DIR ndb_mgm
+  test_exec_path $MGM_SERVER_DIR ndb_mgmd
+  test_exec_path $MYSQL_CLIENT_DIR mysql
+  test_exec_path $MYSQL_CLIENT_DIR mysqladmin
+  test_exec_path $MYSQL_SERVER_DIR mysqld_safe
+  test_exec_path $NDBD_DIR ndbd
+  test_exec_path $MEMCACHED_DIR memcached
+  test_exec_path $SCRIPTS_DIR mysql_install_db
+  test_read_path $ENGINE_SO_DIR ndb_engine.so
+  test_read_path $METADATA_DIR ndb_memcache_metadata.sql
 }
 
-test_path() {
+test_exec_path() {
   if [ ! -x $1/$2 ] 
    then
     echo "Fatal error: cannot execute $2 at $1"
@@ -53,6 +80,14 @@ test_path() {
   fi
 }
 
+test_read_path() {
+  if [ ! -r $1/$2 ] 
+   then
+    echo "Fatal error: cannot read $2 at $1"
+    exit
+  fi
+}
+
 build_dirs() {
   echo Creating sandbox
   mkdir $HOME_BASE/sandbox
@@ -100,33 +135,48 @@ write_cluster_ini() {
 }
 
 do_install_db() {
-  $MYSQL_SCRIPTS/mysql_install_db \
-    --basedir=$MYSQL_PREFIX --datadir=$HOME_BASE/sandbox/data \
-    --skip-name-resolve --user=$MYSELF > /dev/null
-  if test ! -d sandbox/data/mysql 
-    then echo "Failed: mysql_install_db did not work." && exit
+  OUTFILE=`mktemp`
+  $SCRIPTS_DIR/mysql_install_db \
+    --basedir=$INSTALL_DIR --datadir=$HOME_BASE/sandbox/data \
+    --skip-name-resolve --user=$MYSELF > $OUTFILE
+  if test ! -d $HOME_BASE/sandbox/data/mysql 
+    then echo "Failed: mysql_install_db did not work:"
+    cat $OUTFILE
+    rm $OUTFILE
+    exit    
     else echo "Created MySQL System Tables"
+    rm $OUTFILE
   fi
 }
 
+wait_for_mysql_ready() {
+  while ! $MYSQL_CLIENT_DIR/mysqladmin -s ping 
+    do
+      sleep 1
+      echo waiting
+  done
+}
+
+
 start_mgm_server() {
   INITIAL="--initial"
-  test -f sandbox/ndb-config/ndb_1_config.bin.1 && INITIAL=""
+  test -f $HOME_BASE/sandbox/ndb-config/ndb_1_config.bin.1 && INITIAL=""
 
-  $MYSQL_LIBEXEC/ndb_mgmd -f $HOME_BASE/sandbox/cluster.ini \
+  $MGM_SERVER_DIR/ndb_mgmd -f $HOME_BASE/sandbox/cluster.ini \
     --config-dir=$HOME_BASE/sandbox/ndb-config $INITIAL
     
   sleep 5
 }
 
 start_ndbd() {
-  $MYSQL_LIBEXEC/ndbd 
+  $NDBD_DIR/ndbd 
   sleep 5
 }
 
 start_mysql_server() {
-  $MYSQL_LIBEXEC/mysqld_safe --defaults-file=$HOME_BASE/sandbox/my.cnf &
-  sleep 5
+  $MYSQL_SERVER_DIR/mysqld_safe --defaults-file=$HOME_BASE/sandbox/my.cnf &
+  sleep 3
+  wait_for_mysql_ready
 }
 
 mysqld_is_running() {  
@@ -154,16 +204,16 @@ test_pid() {
 
 load_metadata() {
   echo Loading NDB Memcache configuration data
-  DDL_SCRIPT=$SOURCE_TREE/scripts/ndb_memcache_metadata.sql
-  $MYSQL_BIN/mysql --connect-timeout=10 -u root < $DDL_SCRIPT || TRY_AGAIN=1
-  test "$TRY_AGAIN" = "1" && $MYSQL_BIN/mysql --connect-timeout=10 \
+  DDL_SCRIPT=$METADATA_DIR/ndb_memcache_metadata.sql
+  $MYSQL_CLIENT_DIR/mysql --connect-timeout=10 -u root < $DDL_SCRIPT || TRY_AGAIN=1
+  test "$TRY_AGAIN" = "1" && $MYSQL_CLIENT_DIR/mysql --connect-timeout=10 \
     -u root < $DDL_SCRIPT
 }
 
 start_memcached() {
-    $memcached_binary -d -v \
+    $MEMCACHED_DIR/memcached -d -v \
     -P $HOME_BASE/sandbox/memcached.pid \
-    -E $NDB_ENGINE_SO $1 $2
+    -E $ENGINE_SO_DIR/ndb_engine.so $1 $2
 }
 
 stop_memcached() {
@@ -172,18 +222,18 @@ stop_memcached() {
 }
 
 stop_mysqld() {
-  $MYSQL_BIN/mysqladmin -u root shutdown
+  $MYSQL_CLIENT_DIR/mysqladmin -u root shutdown
 }
 
 stop_mgm_server() {
-  $MYSQL_BIN/ndb_mgm -e shutdown
+  $MGM_CLIENT_DIR/ndb_mgm -e shutdown
 }
 
 final_message() {
   sleep 2
   echo
   echo "Sandbox directory is $HOME_BASE/sandbox"
-  echo "Memcached is $memcached_binary"
+  echo "Memcached is $MEMCACHED_DIR/memcached"
   echo 
   echo "Use \"sh sandbox.sh stop\" to stop memcached & MySQL Cluster"
   echo 

=== modified file 'storage/ndb/memcache/src/ExternalValue.cc'
--- a/storage/ndb/memcache/src/ExternalValue.cc	2012-07-18 06:54:14 +0000
+++ b/storage/ndb/memcache/src/ExternalValue.cc	2013-02-08 04:21:39 +0000
@@ -159,7 +159,7 @@ op_status_t ExternalValue::do_read_heade
   op.readColumn(COL_STORE_CAS);
   
   if(! setupKey(item, op))
-    return op_overflow;
+    return op_bad_key;
   
   workitem_allocate_rowbuffer_1(item, op.requiredBuffer());
   op.buffer = item->row_buffer_1;

=== modified file 'storage/ndb/memcache/src/Operation.cc'
--- a/storage/ndb/memcache/src/Operation.cc	2012-07-20 00:10:07 +0000
+++ b/storage/ndb/memcache/src/Operation.cc	2013-02-08 04:21:39 +0000
@@ -43,11 +43,17 @@ Operation::Operation(QueryPlan *p, int o
   set_default_record();
 }
 
-Operation::Operation(workitem *i) : key_buffer(i->ndb_key_buffer), 
-                                    plan(i->plan),
-                                    op(i->base.verb)
+Operation::Operation(workitem *i, Uint32 mask) : key_buffer(i->ndb_key_buffer), 
+                                                 plan(i->plan),
+                                                 op(i->base.verb)
 {
   set_default_record();
+  if(mask) {
+    row_mask[0] = mask & 0x000000FF;
+    row_mask[1] = mask & 0x0000FF00;
+    row_mask[2] = mask & 0x00FF0000;
+    row_mask[3] = mask & 0xFF000000;
+  }
 }
 
 
@@ -137,27 +143,28 @@ bool Operation::setKey(int nparts, const
 }
 
 
-bool Operation::setKeyFieldsInRow(int nparts, const char *dbkey, size_t key_len ) {
+bool Operation::setFieldsInRow(int offset, const char * desc,
+                               int nparts, const char *val, size_t len ) {
   bool r = true;
   
   if(nparts > 1) {
-    TabSeparatedValues tsv(dbkey, nparts, key_len);
+    TabSeparatedValues tsv(val, nparts, len);
     int idx = 0;
     do {
       if(tsv.getLength()) {
-        DEBUG_PRINT("Set key part %d [%.*s]", idx, tsv.getLength(), tsv.getPointer());
-        if(! setColumn(COL_STORE_KEY+idx, tsv.getPointer(), tsv.getLength()))
+        DEBUG_PRINT("Set %s part %d [%.*s]", desc, idx, tsv.getLength(), tsv.getPointer());
+        if(! setColumn(offset+idx, tsv.getPointer(), tsv.getLength()))
           return false;
       }
       else {
-        DEBUG_PRINT("Set key part NULL: %d ", idx);
-        setColumnNull(COL_STORE_KEY+idx);
+        DEBUG_PRINT("Set %s part NULL: %d ", desc, idx);
+        setColumnNull(offset+idx);
       }
       idx++;
     } while (tsv.advance());
   }
   else {
-    r = setColumn(COL_STORE_KEY, dbkey, key_len);
+    r = setColumn(offset, val, len);
   }
   return r;
 }

=== modified file 'storage/ndb/memcache/src/Record.cc'
--- a/storage/ndb/memcache/src/Record.cc	2012-04-14 00:53:04 +0000
+++ b/storage/ndb/memcache/src/Record.cc	2013-02-08 04:21:39 +0000
@@ -323,13 +323,21 @@ bool Record::setUint64Value(int id, Uint
     nullmapSetNotNull(idx, data);
   char * buffer = data + specs[idx].offset;
 
-  if(specs[idx].column->getType() != NdbDictionary::Column::Bigunsigned) {
-    logger->log(LOG_WARNING, 0, "Operation failed - column %s must be BIGINT UNSIGNED",
-                specs[idx].column->getName());
-    return false;
+  /* If the column is a BIGINT and the value can be written natively, encode it.
+     Otherwise write out a string value and let the DataTypeHandler do the job.
+  */
+  if((specs[idx].column->getType() == NdbDictionary::Column::Bigunsigned) 
+     || (specs[idx].column->getType() == NdbDictionary::Column::Bigint 
+          && ((Int64) value) > 0))
+  {
+    STORE_FOR_ARCHITECTURE(Uint64, value, buffer);
+  }
+  else {
+    char stringified[32];
+    int len = sprintf(stringified, "%llu", value);
+    handlers[idx]->writeToNdb(specs[idx].column, len, stringified, buffer);
   }
   
-  STORE_FOR_ARCHITECTURE(Uint64, value, buffer);
   return true;
 }  
 

=== modified file 'storage/ndb/memcache/src/ndb_worker.cc'
--- a/storage/ndb/memcache/src/ndb_worker.cc	2012-10-12 04:54:41 +0000
+++ b/storage/ndb/memcache/src/ndb_worker.cc	2013-02-08 17:18:08 +0000
@@ -73,7 +73,7 @@
 
 class WorkerStep1 {
 public:
-  WorkerStep1(struct workitem *, bool has_server_cas);
+  WorkerStep1(struct workitem *);
   op_status_t do_append();           // begin an append/prepend operation
   op_status_t do_read();             // begin a read operation 
   op_status_t do_write();            // begin a SET/ADD/REPLACE operation
@@ -83,7 +83,6 @@ public:
 private:
   /* Private member variables */
   workitem *wqitem;
-  bool server_cas;
   NdbTransaction *tx;
   QueryPlan * &plan;
 
@@ -221,8 +220,7 @@ void worker_set_ext_flag(workitem *item)
    Returns true if executeAsynchPrepare() has been called on the item.
 */
 op_status_t worker_prepare_operation(workitem *newitem) {
-  bool server_cas = (newitem->prefix_info.has_cas_col && newitem->cas);
-  WorkerStep1 worker(newitem, server_cas);
+  WorkerStep1 worker(newitem);
   op_status_t r;
 
   worker_set_ext_flag(newitem);
@@ -262,12 +260,16 @@ op_status_t worker_prepare_operation(wor
 
 /***************** STEP ONE OPERATIONS ***************************************/
 
-WorkerStep1::WorkerStep1(workitem *newitem, bool do_server_cas) :
+WorkerStep1::WorkerStep1(workitem *newitem) :
   wqitem(newitem), 
-  server_cas(do_server_cas), 
   tx(0),
-  plan(newitem->plan)
-{};
+  plan(newitem->plan) 
+{
+  /* Set cas_owner in workitem.
+     (Further refine the semantics of this.  Does it depend on do_mc_read?)
+  */  
+    newitem->base.cas_owner = (newitem->prefix_info.has_cas_col);
+};
 
 
 op_status_t WorkerStep1::do_delete() {
@@ -287,12 +289,10 @@ op_status_t WorkerStep1::do_delete() {
   
   tx = op.startTransaction(wqitem->ndb_instance->db);
   
-  if(server_cas && * wqitem->cas) {
-    // ndb_op = op.deleteTupleCAS(tx, & options);  
-  }
-  else {
-    ndb_op = op.deleteTuple(tx);    
-  }
+  /* Here we could also support op.deleteTupleCAS(tx, & options)
+     but the protocol is ambiguous about whether this is allowed.
+  */ 
+  ndb_op = op.deleteTuple(tx);    
   
   /* Check for errors */
   if(ndb_op == 0) {
@@ -318,7 +318,7 @@ op_status_t WorkerStep1::do_write() {
   }
   
   uint64_t cas_in = *wqitem->cas;                  // read old value
-  if(server_cas) {
+  if(wqitem->base.cas_owner) {
     worker_set_cas(wqitem->pipeline, wqitem->cas);    // generate a new value
     hash_item_set_cas(wqitem->cache_item, * wqitem->cas); // store it
   }
@@ -363,7 +363,7 @@ op_status_t WorkerStep1::do_write() {
     if(! op_ok) return op_overflow;
   }
   
-  if(server_cas) {
+  if(wqitem->base.cas_owner) {
     op.setColumnBigUnsigned(COL_STORE_CAS, * wqitem->cas);   // the cas
   }
   
@@ -419,7 +419,7 @@ op_status_t WorkerStep1::do_write() {
     ndb_op = op.insertTuple(tx);
   }
   else if(wqitem->base.verb == OPERATION_CAS) {    
-    if(server_cas) {
+    if(wqitem->base.cas_owner) {
       /* NdbOperation.hpp says: "All data is copied out of the OperationOptions 
        structure (and any subtended structures) at operation definition time."      
        */
@@ -594,7 +594,7 @@ op_status_t WorkerStep1::do_math() {
     op2.setKeyFieldsInRow(plan->spec->nkeycols, dbkey, wqitem->base.nsuffix);
     
     /* CAS */
-    if(server_cas) {
+    if(wqitem->base.cas_owner) {
       op1.readColumn(COL_STORE_CAS);
       op2.setColumnBigUnsigned(COL_STORE_CAS, * wqitem->cas);
       op3.setColumnBigUnsigned(COL_STORE_CAS, * wqitem->cas);
@@ -1047,10 +1047,13 @@ void worker_finalize_read(NdbTransaction
 
 void worker_finalize_write(NdbTransaction *tx, workitem *wqitem) {
   if(wqitem->prefix_info.do_mc_write) {
-    /* If the write was succesful, update the local cache */
+    /* If the write was successful, update the local cache */
     /* Possible bugs here: 
      (1) store_item will store nbytes as length, which is wrong.
      (2) The CAS may be incorrect.
+     Status as of Feb. 2013: 
+        Memcapable INCR/DECR/APPEND/PREPEND tests fail when
+        local caching is enabled.
     */
     ndb_pipeline * & pipeline = wqitem->pipeline;
     struct default_engine * se;
@@ -1127,8 +1130,9 @@ void build_hash_item(workitem *wqitem, O
     /* store it in the local cache? */
     // fixme: probably nbytes is wrong
     if(wqitem->prefix_info.do_mc_read) {
+      uint64_t *cas = hash_item_get_cas_ptr(item);
       ENGINE_ERROR_CODE status;
-      status = store_item(se, item, wqitem->cas, OPERATION_SET, wqitem->cookie);
+      status = store_item(se, item, cas, OPERATION_SET, wqitem->cookie);
       if(status != ENGINE_SUCCESS)
         wqitem->status = & status_block_memcache_error;
     }

=== modified file 'storage/ndb/memcache/src/schedulers/S_sched.cc'
--- a/storage/ndb/memcache/src/schedulers/S_sched.cc	2012-04-06 01:00:19 +0000
+++ b/storage/ndb/memcache/src/schedulers/S_sched.cc	2013-02-08 04:21:39 +0000
@@ -314,7 +314,6 @@ ENGINE_ERROR_CODE S::SchedulerWorker::sc
   /* READ LOCK RELEASED */
   
   item->base.nsuffix = item->base.nkey - pfx->prefix_len;
-  if(item->base.nsuffix == 0) return ENGINE_EINVAL; // key too short
  
   if(wc == 0) return ENGINE_FAILED;
     
@@ -376,6 +375,10 @@ ENGINE_ERROR_CODE S::SchedulerWorker::sc
         DEBUG_PRINT("op_status is op_not_supported");
         response_code = ENGINE_ENOTSUP;
         break;
+      case op_bad_key:
+        DEBUG_PRINT("op_status is op_bad_key");
+        response_code = ENGINE_EINVAL;
+        break;
       case op_overflow:
         DEBUG_PRINT("op_status is op_overflow");
         response_code = ENGINE_E2BIG;

=== modified file 'storage/ndb/memcache/src/schedulers/Stockholm.cc'
--- a/storage/ndb/memcache/src/schedulers/Stockholm.cc	2012-04-03 00:34:18 +0000
+++ b/storage/ndb/memcache/src/schedulers/Stockholm.cc	2013-02-08 04:38:53 +0000
@@ -205,13 +205,17 @@ ENGINE_ERROR_CODE Scheduler_stockholm::s
     case op_not_supported:
       response_code = ENGINE_ENOTSUP;
       break;
+   case op_overflow:
+      response_code = ENGINE_E2BIG;  // ENGINE_FAILED ?
+      break;
+    case op_bad_key:
+      response_code = ENGINE_EINVAL;
+      break;
     case op_failed:
+    default:
       response_code = ENGINE_FAILED;
       break;
-    case op_overflow:
-      response_code = ENGINE_E2BIG;  // ENGINE_FAILED ?
-      break;
-  }
+   }
 
   return response_code;
 }

=== modified file 'storage/ndb/memcache/src/workitem.c'
--- a/storage/ndb/memcache/src/workitem.c	2012-03-07 01:22:53 +0000
+++ b/storage/ndb/memcache/src/workitem.c	2013-02-08 15:54:41 +0000
@@ -197,6 +197,6 @@ void workitem_free(workitem *item)
 size_t workitem_get_key_buf_size(int nkey) {
   size_t bufsz;
   bufsz = nkey + 3;       // at least key + 2 length bytes + null terminator
-  return (bufsz < 9) ? 9 : bufsz;  // A packed DECIMAL could ned 9 bytes
+  return (bufsz < 9) ? 9 : bufsz;  // A packed DECIMAL could need 9 bytes
 }
 

No bundle (reason: useless for push emails).
Thread
bzr push into mysql-5.5-cluster-7.2 branch (john.duncan:4167 to 4168) John David Duncan3 Apr