List:Commits« Previous MessageNext Message »
From:jonas oreland Date:February 9 2012 10:23am
Subject:bzr push into mysql-5.5-cluster-7.2 branch (jonas.oreland:3807 to 3808)
View as plain text  
 3808 jonas oreland	2012-02-09 [merge]
      ndb - merge 71 to 72

    added:
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordDeleteOperationImpl.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordInsertOperationImpl.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordKeyOperationImpl.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordResultDataImpl.java
    modified:
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/SessionFactoryImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/SessionImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/QueryDomainTypeImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/store/ClusterConnection.java
      storage/ndb/clusterj/clusterj-openjpa/src/main/java/com/mysql/clusterj/openjpa/NdbOpenJPAStoreManager.java
      storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/BinaryPKTest.java
      storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/StressTest.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/ClusterConnectionImpl.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/ClusterTransactionImpl.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/DbImpl.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/DictionaryImpl.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordImpl.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordOperationImpl.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/Utility.java
      storage/ndb/clusterj/clusterj-tie/src/test/java/testsuite/clusterj/tie/StressTest.java
 3807 jonas oreland	2012-02-07 [merge]
      ndb - merge 71 to 72

    modified:
      storage/ndb/cmake/os/WindowsCache.cmake
      storage/ndb/include/ndb_config.h.in
      storage/ndb/include/util/Bitmask.hpp
      storage/ndb/ndb_configure.cmake
      storage/ndb/src/common/util/Bitmask.cpp
      storage/ndb/src/kernel/vm/DynArr256.cpp
      storage/ndb/src/kernel/vm/DynArr256.hpp
=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/SessionFactoryImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/SessionFactoryImpl.java	2011-10-27 23:43:25 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/SessionFactoryImpl.java	2012-02-08 17:27:45 +0000
@@ -1,5 +1,5 @@
 /*
-   Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
+   Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved.
 
    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
@@ -29,6 +29,7 @@ import com.mysql.clusterj.SessionFactory
 
 import com.mysql.clusterj.core.spi.DomainTypeHandler;
 import com.mysql.clusterj.core.spi.DomainTypeHandlerFactory;
+
 import com.mysql.clusterj.core.metadata.DomainTypeHandlerFactoryImpl;
 
 import com.mysql.clusterj.core.store.Db;
@@ -497,9 +498,14 @@ public class SessionFactoryImpl implemen
                 // remove the ndb dictionary cached table definition
                 tableName = domainTypeHandler.getTableName();
                 if (tableName != null) {
+                    if (logger.isDebugEnabled())logger.debug("Removing dictionary entry for table " + tableName
+                            + " for class " + cls.getName());
                     dictionary.removeCachedTable(tableName);
                 }
             }
+            for (ClusterConnection clusterConnection: pooledConnections) {
+                clusterConnection.unloadSchema(tableName);
+            }
             return tableName;
         }
     }

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/SessionImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/SessionImpl.java	2012-01-23 00:44:39 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/SessionImpl.java	2012-02-08 17:27:45 +0000
@@ -300,10 +300,12 @@ public class SessionImpl implements Sess
         Table storeTable = domainTypeHandler.getStoreTable();
         // perform a primary key operation
         final Operation op = clusterTransaction.getSelectOperation(storeTable);
+        op.beginDefinition();
         // set the keys into the operation
         domainTypeHandler.operationSetKeys(instanceHandler, op);
         // set the expected columns into the operation
         domainTypeHandler.operationGetValues(op);
+        op.endDefinition();
         final ResultData rs = op.resultData(false);
         final SessionImpl cacheManager = this;
         // defer execution of the key operation until the next find, flush, or query
@@ -465,7 +467,9 @@ public class SessionImpl implements Sess
         Operation op = null;
         try {
             op = clusterTransaction.getDeleteOperation(storeTable);
+            op.beginDefinition();
             domainTypeHandler.operationSetKeys(valueHandler, op);
+            op.endDefinition();
         } catch (ClusterJException ex) {
             failAutoTransaction();
             throw new ClusterJException(
@@ -576,10 +580,12 @@ public class SessionImpl implements Sess
         Table storeTable = domainTypeHandler.getStoreTable();
         // perform a single select by key operation
         Operation op = clusterTransaction.getSelectOperation(storeTable);
+        op.beginDefinition();
         // set the keys into the operation
         domainTypeHandler.operationSetKeys(keyHandler, op);
         // set the expected columns into the operation
         domainTypeHandler.operationGetValues(op);
+        op.endDefinition();
         // execute the select and get results
         ResultData rs = op.resultData();
         return rs;
@@ -1405,6 +1411,9 @@ public class SessionImpl implements Sess
         }
     }
 
+    /** Unload the schema associated with the domain class. This allows schema changes to work.
+     * @param cls the class for which to unload the schema
+     */
     public String unloadSchema(Class<?> cls) {
         return factory.unloadSchema(cls, dictionary);
     }

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/QueryDomainTypeImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/QueryDomainTypeImpl.java	2011-11-23 10:22:31 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/QueryDomainTypeImpl.java	2012-02-09 10:22:48 +0000
@@ -1,5 +1,5 @@
 /*
-   Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved.
+   Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved.
 
    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
@@ -208,10 +208,12 @@ public class QueryDomainTypeImpl<T> impl
             case PRIMARY_KEY: {
                 // perform a select operation
                 Operation op = session.getSelectOperation(domainTypeHandler.getStoreTable());
+                op.beginDefinition();
                 // set key values into the operation
                 index.operationSetKeys(context, op);
                 // set the expected columns into the operation
                 domainTypeHandler.operationGetValues(op);
+                op.endDefinition();
                 // execute the select and get results
                 result = op.resultData();
                 break;
@@ -310,8 +312,10 @@ public class QueryDomainTypeImpl<T> impl
                     // perform a delete by primary key operation
                     if (logger.isDetailEnabled()) logger.detail("Using delete by primary key.");
                     Operation op = session.getDeleteOperation(domainTypeHandler.getStoreTable());
+                    op.beginDefinition();
                     // set key values into the operation
                     index.operationSetKeys(context, op);
+                    op.endDefinition();
                     // execute the delete operation
                     session.executeNoCommit(false, true);
                     errorCode = op.errorCode();

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/store/ClusterConnection.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/store/ClusterConnection.java	2011-03-08 00:44:56 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/store/ClusterConnection.java	2012-02-08 17:27:45 +0000
@@ -1,5 +1,5 @@
 /*
- *  Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
+ *  Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved.
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
@@ -34,4 +34,6 @@ public interface ClusterConnection {
 
     public void close(Db db);
 
+    public void unloadSchema(String tableName);
+
 }

=== modified file 'storage/ndb/clusterj/clusterj-openjpa/src/main/java/com/mysql/clusterj/openjpa/NdbOpenJPAStoreManager.java'
--- a/storage/ndb/clusterj/clusterj-openjpa/src/main/java/com/mysql/clusterj/openjpa/NdbOpenJPAStoreManager.java	2011-08-03 01:02:19 +0000
+++ b/storage/ndb/clusterj/clusterj-openjpa/src/main/java/com/mysql/clusterj/openjpa/NdbOpenJPAStoreManager.java	2012-02-08 17:27:45 +0000
@@ -1,5 +1,5 @@
 /*
-   Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+   Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved.
 
    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
@@ -584,6 +584,7 @@ public class NdbOpenJPAStoreManager exte
         session.startAutoTransaction();
         try {
             Operation op = session.getSelectOperation(storeTable);
+            op.beginDefinition();
             int[] keyFields = domainTypeHandler.getKeyFieldNumbers();
             BitSet fieldsInResult = new BitSet();
             for (int i : keyFields) {
@@ -597,6 +598,7 @@ public class NdbOpenJPAStoreManager exte
                 fieldHandler.operationGetValue(op);
                 fieldsInResult.set(fieldHandler.getFieldNumber());
             }
+            op.endDefinition();
             ResultData resultData = op.resultData();
             NdbOpenJPAResult result = new NdbOpenJPAResult(resultData, domainTypeHandler, fieldsInResult);
             session.endAutoTransaction();

=== modified file 'storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/BinaryPKTest.java'
--- a/storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/BinaryPKTest.java	2011-02-06 21:37:05 +0000
+++ b/storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/BinaryPKTest.java	2012-02-08 17:27:45 +0000
@@ -1,5 +1,5 @@
 /*
- *  Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ *  Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
@@ -84,7 +84,7 @@ public class BinaryPKTest extends Abstra
             if (0 == i % 4) {
                 byte[] key = getStoragePK(i);
                 BinaryPK instance = session.find(BinaryPK.class, key);
-                verifyResult("update verify", instance, i, true);
+                verifyResult("update verify ", instance, i, true);
             }
         }
     }
@@ -145,13 +145,13 @@ public class BinaryPKTest extends Abstra
     }
 
     protected byte[] getStoragePK(int index) {
-        return new byte[] {0, (byte)(index/256), (byte)(index%256)};
+        return new byte[] {0, (byte)((index/256) + 65), (byte)((index%256) + 65)};
     }
 
     protected byte[] getResultPK(int index) {
         byte[] result = new byte[255];
-        result[1] = (byte)(index/256);
-        result[2] = (byte)(index%256);
+        result[1] = (byte)((index/256) + 65);
+        result[2] = (byte)((index%256) + 65);
         return result;
     }
 
@@ -160,28 +160,31 @@ public class BinaryPKTest extends Abstra
     }
 
     protected void verifyStorage(String where, BinaryPK instance, int index, boolean updated) {
-        errorIfNotEqual(where + "id failed", toString(getStoragePK(index)), toString(instance.getId()));
-        errorIfNotEqual(where + "number failed", index, instance.getNumber());
+        errorIfNotEqual(where + "mismatch on id", toString(getStoragePK(index)), toString(instance.getId()));
+        errorIfNotEqual(where + "mismatch on number", index, instance.getNumber());
         if (updated) {
-            errorIfNotEqual(where + "Value failed", getValue(NUMBER_OF_INSTANCES - index), instance.getName());
+            errorIfNotEqual(where + "mismatch on name", getValue(NUMBER_OF_INSTANCES - index), instance.getName());
         } else {
-            errorIfNotEqual(where + "Value failed", getValue(index), instance.getName());
+            errorIfNotEqual(where + "mismatch on name", getValue(index), instance.getName());
 
         }
     }
 
     protected void verifyResult(String where, BinaryPK instance, int index, boolean updated) {
-        errorIfNotEqual(where + "id failed", toString(getResultPK(index)), toString(instance.getId()));
-        errorIfNotEqual("number failed", index, instance.getNumber());
+        errorIfNotEqual(where + "mismatch on id", toString(getResultPK(index)), toString(instance.getId()));
+        errorIfNotEqual("mismatch on number", index, instance.getNumber());
         if (updated) {
-            errorIfNotEqual(where + "Value failed", getValue(NUMBER_OF_INSTANCES - index), instance.getName());
+            errorIfNotEqual(where + "mismatch on name", getValue(NUMBER_OF_INSTANCES - index), instance.getName());
         } else {
-            errorIfNotEqual(where + "Value failed", getValue(index), instance.getName());
+            errorIfNotEqual(where + "mismatch on name", getValue(index), instance.getName());
 
         }
     }
 
     private String toString(byte[] id) {
+        if (id == null) {
+            return "null";
+        }
         StringBuilder builder = new StringBuilder();
         for (int i = 0; i < id.length; ++i) {
             builder.append(String.valueOf(id[i]));

=== modified file 'storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/StressTest.java'
--- a/storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/StressTest.java	2012-01-21 02:22:20 +0000
+++ b/storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/StressTest.java	2012-02-08 17:27:45 +0000
@@ -17,6 +17,8 @@
 
 package testsuite.clusterj;
 
+import java.nio.ByteBuffer;
+
 import org.junit.Ignore;
 
 import com.mysql.clusterj.ClusterJFatalUserException;
@@ -33,14 +35,48 @@ public class StressTest extends Abstract
 
     private static final int NUMBER_TO_INSERT = 4000;
 
-    private static final int ITERATIONS = 5;
+    private static final int ITERATIONS = 7;
+
+    private static final int ITERATIONS_TO_DROP = 3;
 
-    private static final String tableName = "stress";
+    private static String tableName;
+
+    private static final String STRESS_TEST_TABLE_PROPERTY_NAME = "com.mysql.clusterj.StressTestTable";
+
+    static {
+        String env = System.getenv(STRESS_TEST_TABLE_PROPERTY_NAME);
+        String def = (env == null)?"stress":env;
+        tableName = System.getProperty(STRESS_TEST_TABLE_PROPERTY_NAME, def);
+    }
 
     private ColumnMetadata[] columnMetadatas;
 
     private Timer timer = new Timer();
 
+    private static int BYTES_LENGTH = 12000;
+
+    private static ByteBuffer BYTES = ByteBuffer.allocate(BYTES_LENGTH);
+
+    static {
+        for (int i = 0; i < BYTES_LENGTH; ++i) {
+            // only printable bytes from ABC..^_`
+            BYTES.put((byte)((i % 32) + 65));
+        }
+    }
+
+    private static int STRING_LENGTH = 12000;
+
+    private static String STRING;
+
+    static {
+        StringBuilder builder = new StringBuilder();
+        for (int i = 0; i < STRING_LENGTH; ++i) {
+            // only printable bytes from ABC..^_`
+            builder.append((byte)((i % 32) + 65));
+        }
+        STRING = builder.toString();
+    }
+
     @Override
     java.lang.Class<? extends IdBase> getModelClass() {
         return Stress.class;
@@ -55,7 +91,25 @@ public class StressTest extends Abstract
         columnMetadatas = session.newInstance(Stress.class).columnMetadata();
     }
 
-    public void testInsAattr_indy() {
+    public void testIndy() {
+        insAattr_indy();
+        getA_indy();
+        delA_indy();
+    }
+
+    public void testEach() {
+        insAattr_each();
+        getA_each();
+        delA_each();
+    }
+
+    public void testBulk() {
+        insAattr_bulk();
+        getA_bulk();
+        delA_bulk();
+    }
+
+    public void insAattr_indy() {
         long total = 0;
         for (int i = 0; i < ITERATIONS; ++i) {
             // garbage collect what we can before each test
@@ -67,13 +121,13 @@ public class StressTest extends Abstract
             }
             // drop the first iteration
             timer.stop();
-            if (i > 0) total += timer.time();
-            System.out.println("testInsAattr_indy: " + timer.time());
+            if (i >= ITERATIONS_TO_DROP) total += timer.time();
+            System.out.println("insAattr_indy: " + timer.time());
         }
-        System.out.println("Average: " + total/(ITERATIONS - 1) + "\n");
+        System.out.println("Excluding " + ITERATIONS_TO_DROP + " Average: " + total/(ITERATIONS - ITERATIONS_TO_DROP) + "\n");
     }
 
-    public void testInsAattr_each() {
+    public void insAattr_each() {
         long total = 0;
         for (int i = 0; i < ITERATIONS; ++i) {
             // garbage collect what we can before each test
@@ -88,13 +142,13 @@ public class StressTest extends Abstract
             session.currentTransaction().commit();
             // drop the first iteration
             timer.stop();
-            if (i > 0) total += timer.time();
-            System.out.println("testInsAattr_each: " + timer.time());
+            if (i >= ITERATIONS_TO_DROP) total += timer.time();
+            System.out.println("insAattr_each: " + timer.time());
         }
-        System.out.println("Average: " + total/(ITERATIONS - 1) + "\n");
+        System.out.println("Excluding " + ITERATIONS_TO_DROP + " Average: " + total/(ITERATIONS - ITERATIONS_TO_DROP) + "\n");
     }
 
-    public void testInsAattr_bulk() {
+    public void insAattr_bulk() {
         long total = 0;
         for (int i = 0; i < ITERATIONS; ++i) {
             // garbage collect what we can before each test
@@ -108,10 +162,122 @@ public class StressTest extends Abstract
             session.currentTransaction().commit();
             // drop the first iteration
             timer.stop();
-            if (i > 0) total += timer.time();
-            System.out.println("testInsAattr_bulk: " + timer.time());
+            if (i >= ITERATIONS_TO_DROP) total += timer.time();
+            System.out.println("insAattr_bulk: " + timer.time());
+        }
+        System.out.println("Excluding " + ITERATIONS_TO_DROP + " Average: " + total/(ITERATIONS - ITERATIONS_TO_DROP) + "\n");
+    }
+
+    public void getA_indy() {
+        long total = 0;
+        for (int i = 0; i < ITERATIONS; ++i) {
+            // garbage collect what we can before each test
+            gc();
+            timer.start();
+            for (int key = 0; key < NUMBER_TO_INSERT; ++key) {
+                session.find(Stress.class, key);
+            }
+            // drop the first iteration
+            timer.stop();
+            if (i >= ITERATIONS_TO_DROP) total += timer.time();
+            System.out.println("getA_indy: " + timer.time());
+        }
+        System.out.println("Excluding " + ITERATIONS_TO_DROP + " Average: " + total/(ITERATIONS - ITERATIONS_TO_DROP) + "\n");
+    }
+
+    public void getA_each() {
+        long total = 0;
+        for (int i = 0; i < ITERATIONS; ++i) {
+            // garbage collect what we can before each test
+            gc();
+            timer.start();
+            session.currentTransaction().begin();
+            for (int key = 0; key < NUMBER_TO_INSERT; ++key) {
+                session.find(Stress.class, key);
+            }
+            session.currentTransaction().commit();
+            // drop the first iteration
+            timer.stop();
+            if (i >= ITERATIONS_TO_DROP) total += timer.time();
+            System.out.println("getA_each: " + timer.time());
+        }
+        System.out.println("Excluding " + ITERATIONS_TO_DROP + " Average: " + total/(ITERATIONS - ITERATIONS_TO_DROP) + "\n");
+    }
+
+    public void getA_bulk() {
+        long total = 0;
+        for (int i = 0; i < ITERATIONS; ++i) {
+            // garbage collect what we can before each test
+            gc();
+            timer.start();
+            session.currentTransaction().begin();
+            for (int key = 0; key < NUMBER_TO_INSERT; ++key) {
+                Stress instance = createObject(key);
+                session.load(instance);
+            }
+            session.currentTransaction().commit();
+            // drop the first iteration
+            timer.stop();
+            if (i >= ITERATIONS_TO_DROP) total += timer.time();
+            System.out.println("getA_bulk: " + timer.time());
+        }
+        System.out.println("Excluding " + ITERATIONS_TO_DROP + " Average: " + total/(ITERATIONS - ITERATIONS_TO_DROP) + "\n");
+    }
+
+    public void delA_indy() {
+        long total = 0;
+        for (int i = 0; i < ITERATIONS; ++i) {
+            // garbage collect what we can before each test
+            gc();
+            timer.start();
+            for (int key = 0; key < NUMBER_TO_INSERT; ++key) {
+                session.deletePersistent(Stress.class, key);
+            }
+            // drop the first iteration
+            timer.stop();
+            if (i >= ITERATIONS_TO_DROP) total += timer.time();
+            System.out.println("delA_indy: " + timer.time());
+        }
+        System.out.println("Excluding " + ITERATIONS_TO_DROP + " Average: " + total/(ITERATIONS - ITERATIONS_TO_DROP) + "\n");
+    }
+
+    public void delA_each() {
+        long total = 0;
+        for (int i = 0; i < ITERATIONS; ++i) {
+            // garbage collect what we can before each test
+            gc();
+            timer.start();
+            session.currentTransaction().begin();
+            for (int key = 0; key < NUMBER_TO_INSERT; ++key) {
+                session.deletePersistent(Stress.class, key);
+                session.flush();
+            }
+            session.currentTransaction().commit();
+            // drop the first iteration
+            timer.stop();
+            if (i >= ITERATIONS_TO_DROP) total += timer.time();
+            System.out.println("delA_each: " + timer.time());
+        }
+        System.out.println("Excluding " + ITERATIONS_TO_DROP + " Average: " + total/(ITERATIONS - ITERATIONS_TO_DROP) + "\n");
+    }
+
+    public void delA_bulk() {
+        long total = 0;
+        for (int i = 0; i < ITERATIONS; ++i) {
+            // garbage collect what we can before each test
+            gc();
+            timer.start();
+            session.currentTransaction().begin();
+            for (int key = 0; key < NUMBER_TO_INSERT; ++key) {
+                session.deletePersistent(Stress.class, key);
+            }
+            session.currentTransaction().commit();
+            // drop the first iteration
+            timer.stop();
+            if (i >= ITERATIONS_TO_DROP) total += timer.time();
+            System.out.println("delA_bulk: " + timer.time());
         }
-        System.out.println("Average: " + total/(ITERATIONS - 1) + "\n");
+        System.out.println("Excluding " + ITERATIONS_TO_DROP + " Average: " + total/(ITERATIONS - ITERATIONS_TO_DROP) + "\n");
     }
 
     protected Stress createObject(int key) {
@@ -125,14 +291,39 @@ public class StressTest extends Abstract
                 continue;
             }
             Class<?> cls = columnMetadata.javaType();
+            int length = columnMetadata.maximumLength();
             if (int.class == cls) {
-                value = key;
+                value = key + columnNumber;
             } else if (long.class == cls) {
-                value = (long)key;
+                value = (long)(key + columnNumber);
             } else if (float.class == cls) {
-                value = (float)key;
+                value = (float)(key + columnNumber);
             } else if (double.class == cls) {
-                value = (double)key;
+                value = (double)(key + columnNumber);
+            } else if (short.class == cls) {
+                value = (short)(key + columnNumber);
+            } else if (byte.class == cls) {
+                value = (byte)(key + columnNumber);
+            } else if (Integer.class == cls) {
+                value = (int)(key + columnNumber);
+            } else if (Long.class == cls) {
+                value = (long)(key + columnNumber);
+            } else if (Float.class == cls) {
+                value = (float)(key + columnNumber);
+            } else if (Double.class == cls) {
+                value = (double)(key + columnNumber);
+            } else if (Short.class == cls) {
+                value = (short)(key + columnNumber);
+            } else if (Byte.class == cls) {
+                value = (byte)(key + columnNumber);
+            } else if (String.class == cls) {
+                // take 'n' characters from the static String
+                value = STRING.substring(key + columnNumber, key + columnNumber + length);
+            } else if (byte[].class == cls) {
+                // take 'n' bytes from the static byte array
+                value = new byte[length];
+                BYTES.position((key + columnNumber));
+                BYTES.get((byte[])value);
             } else {
                 throw new ClusterJFatalUserException("Unsupported column type " + cls.getName()
                         + " for column " + columnMetadata.name());
@@ -146,6 +337,7 @@ public class StressTest extends Abstract
         public Stress() {}
 
         public String table() {
+            System.out.println("Stress table being used: " + tableName);
             return tableName;
         }
 

=== modified file 'storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/ClusterConnectionImpl.java'
--- a/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/ClusterConnectionImpl.java	2012-01-23 20:54:27 +0000
+++ b/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/ClusterConnectionImpl.java	2012-02-08 17:27:45 +0000
@@ -204,6 +204,7 @@ public class ClusterConnectionImpl
         NdbRecordImpl result = ndbRecordImplMap.get(tableName);
         if (result != null) {
             // case 1
+            if (logger.isDebugEnabled())logger.debug("NdbRecordImpl found for " + tableName);
             return result;
         } else {
             NdbRecordImpl newNdbRecordImpl = new NdbRecordImpl(storeTable, dictionaryForNdbRecord);
@@ -221,4 +222,17 @@ public class ClusterConnectionImpl
         }
     }
 
+    /** Remove the cached NdbRecord associated with this table. This allows schema change to work.
+     * @param tableName the name of the table
+     */
+    public void unloadSchema(String tableName) {
+        if (logger.isDebugEnabled())logger.debug("Removing cached NdbRecord for " + tableName);
+        NdbRecordImpl ndbRecordImpl = ndbRecordImplMap.remove(tableName);
+        if (ndbRecordImpl != null) {
+            ndbRecordImpl.releaseNdbRecord();
+        }
+        if (logger.isDebugEnabled())logger.debug("Removing dictionary entry for cached table " + tableName);
+        dictionaryForNdbRecord.removeCachedTable(tableName);
+    }
+
 }

=== modified file 'storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/ClusterTransactionImpl.java'
--- a/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/ClusterTransactionImpl.java	2012-01-23 00:44:39 +0000
+++ b/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/ClusterTransactionImpl.java	2012-02-08 17:27:45 +0000
@@ -209,13 +209,16 @@ class ClusterTransactionImpl implements
 
     public Operation getDeleteOperation(Table storeTable) {
         enlist();
+        if (logger.isTraceEnabled()) logger.trace("Table: " + storeTable.getName());
+        if (USE_NDBRECORD) {
+            return new NdbRecordDeleteOperationImpl(this, storeTable);
+        }
         TableConst ndbTable = ndbDictionary.getTable(storeTable.getName());
         handleError(ndbTable, ndbDictionary);
         NdbOperation ndbOperation = ndbTransaction.getNdbOperation(ndbTable);
         handleError(ndbOperation, ndbTransaction);
         int returnCode = ndbOperation.deleteTuple();
         handleError(returnCode, ndbTransaction);
-        if (logger.isTraceEnabled()) logger.trace("Table: " + storeTable.getName());;
         return new OperationImpl(ndbOperation, this);
     }
 
@@ -223,7 +226,7 @@ class ClusterTransactionImpl implements
         enlist();
         if (logger.isTraceEnabled()) logger.trace("Table: " + storeTable.getName());
         if (USE_NDBRECORD) {
-            return new NdbRecordOperationImpl(this, storeTable);
+            return new NdbRecordInsertOperationImpl(this, storeTable);
         }
         TableConst ndbTable = ndbDictionary.getTable(storeTable.getName());
         handleError(ndbTable, ndbDictionary);
@@ -284,6 +287,9 @@ class ClusterTransactionImpl implements
 
     public Operation getSelectOperation(Table storeTable) {
         enlist();
+        if (USE_NDBRECORD) {
+            return new NdbRecordKeyOperationImpl(this, storeTable);
+        }
         TableConst ndbTable = ndbDictionary.getTable(storeTable.getName());
         handleError(ndbTable, ndbDictionary);
         NdbOperation ndbOperation = ndbTransaction.getNdbOperation(ndbTable);
@@ -403,6 +409,40 @@ class ClusterTransactionImpl implements
         handleError(operation, ndbTransaction);
         return operation;
     }
+
+    /** Create an NdbOperation for delete using NdbRecord.
+     * 
+     * @param ndbRecord the NdbRecord
+     * @param buffer the buffer with data for the operation
+     * @param mask the mask of column values already set in the buffer
+     * @param options the OperationOptions for this operation
+     * @return the delete operation
+     */
+    public NdbOperationConst deleteTuple(NdbRecordConst ndbRecord,
+            ByteBuffer buffer, byte[] mask, OperationOptionsConst options) {
+        NdbOperationConst operation = ndbTransaction.deleteTuple(ndbRecord, buffer, ndbRecord, null, mask, options, 0);
+        handleError(operation, ndbTransaction);
+        return operation;
+    }
+
+    /** Create an NdbOperation for key read using NdbRecord. The 'find' lock mode is used.
+     * 
+     * @param ndbRecordKeys the NdbRecord for the key
+     * @param keyBuffer the buffer with the key for the operation
+     * @param ndbRecordValues the NdbRecord for the value
+     * @param valueBuffer the buffer with the value returned by the operation
+     * @param mask the mask of column values to be read
+     * @param options the OperationOptions for this operation
+     * @return the ndb operation for key read
+     */
+    public NdbOperationConst readTuple(NdbRecordConst ndbRecordKeys, ByteBuffer keyBuffer,
+            NdbRecordConst ndbRecordValues, ByteBuffer valueBuffer,
+            byte[] mask, OperationOptionsConst options) {
+        NdbOperationConst operation = ndbTransaction.readTuple(ndbRecordKeys, keyBuffer, 
+                ndbRecordValues, valueBuffer, findLockMode, mask, options, 0);
+        handleError(operation, ndbTransaction);
+        return operation;
+    }
 
     public void postExecuteCallback(Runnable callback) {
         postExecuteCallbacks.add(callback);

=== modified file 'storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/DbImpl.java'
--- a/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/DbImpl.java	2012-01-23 00:44:39 +0000
+++ b/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/DbImpl.java	2012-02-08 17:27:45 +0000
@@ -90,7 +90,7 @@ class DbImpl implements com.mysql.cluste
         handleError(returnCode, ndb);
         ndbDictionary = ndb.getDictionary();
         handleError(ndbDictionary, ndb);
-        this.dictionary = new DictionaryImpl(ndbDictionary);
+        this.dictionary = new DictionaryImpl(ndbDictionary, clusterConnection);
     }
 
     public void close() {

=== modified file 'storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/DictionaryImpl.java'
--- a/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/DictionaryImpl.java	2012-01-23 00:44:39 +0000
+++ b/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/DictionaryImpl.java	2012-02-08 17:27:45 +0000
@@ -46,8 +46,11 @@ class DictionaryImpl implements com.mysq
 
     private Dictionary ndbDictionary;
 
-    public DictionaryImpl(Dictionary ndbDictionary) {
+    private ClusterConnectionImpl clusterConnection;
+
+    public DictionaryImpl(Dictionary ndbDictionary, ClusterConnectionImpl clusterConnection) {
         this.ndbDictionary = ndbDictionary;
+        this.clusterConnection = clusterConnection;
     }
 
     public Table getTable(String tableName) {
@@ -123,8 +126,14 @@ class DictionaryImpl implements com.mysq
         }
     }
 
+    /** Remove cached table from this ndb dictionary. This allows schema change to work.
+     * @param tableName the name of the table
+     */
     public void removeCachedTable(String tableName) {
+        // remove the cached table from this dictionary
         ndbDictionary.removeCachedTable(tableName);
+        // also remove the cached NdbRecord associated with this table
+        clusterConnection.unloadSchema(tableName);
     }
 
     public Dictionary getNdbDictionary() {

=== added file 'storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordDeleteOperationImpl.java'
--- a/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordDeleteOperationImpl.java	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordDeleteOperationImpl.java	2012-02-08 17:27:45 +0000
@@ -0,0 +1,54 @@
+/*
+ *  Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+ */
+
+package com.mysql.clusterj.tie;
+
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+
+import com.mysql.clusterj.core.store.Table;
+
+public class NdbRecordDeleteOperationImpl extends NdbRecordOperationImpl {
+
+    /** The number of columns for this operation */
+    protected int numberOfColumns;
+
+    public NdbRecordDeleteOperationImpl(
+            ClusterTransactionImpl clusterTransaction, Table storeTable) {
+        super(clusterTransaction);
+        this.ndbRecordKeys = clusterTransaction.getCachedNdbRecordImpl(storeTable);
+        this.keyBufferSize = ndbRecordKeys.getBufferSize();
+        this.numberOfColumns = ndbRecordKeys.getNumberOfColumns();
+    }
+
+    public void beginDefinition() {
+        // allocate a buffer for the operation data
+        keyBuffer = ByteBuffer.allocateDirect(keyBufferSize);
+        // use platform's native byte ordering
+        keyBuffer.order(ByteOrder.nativeOrder());
+        mask = new byte[1 + (numberOfColumns/8)];
+    }
+
+    public void endDefinition() {
+        // position the buffer at the beginning for ndbjtie
+        keyBuffer.position(0);
+        keyBuffer.limit(keyBufferSize);
+        // create the delete operation
+        ndbOperation = clusterTransaction.deleteTuple(ndbRecordKeys.getNdbRecord(), keyBuffer, mask, null);
+    }
+
+}

=== modified file 'storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordImpl.java'
--- a/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordImpl.java	2012-01-23 00:44:39 +0000
+++ b/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordImpl.java	2012-02-08 17:27:45 +0000
@@ -45,11 +45,11 @@ import com.mysql.ndbjtie.ndbapi.NdbDicti
 import com.mysql.ndbjtie.ndbapi.NdbDictionary.TableConst;
 
 /**
- * Wrapper around an NdbRecord. The default implementation can be used for insert,
+ * Wrapper around an NdbRecord. The default implementation can be used for create, read, update, or delete
  * using an NdbRecord that defines every column in the table. After construction, the instance is
  * read-only and can be shared among all threads that use the same cluster connection; and the size of the
  * buffer required for operations is available. The NdbRecord instance is released when the cluster
- * connection is closed. Column values can be set using a provided
+ * connection is closed or when schema change invalidates it. Column values can be set using a provided
  * buffer and buffer manager.
  */
 public class NdbRecordImpl {
@@ -65,11 +65,11 @@ public class NdbRecordImpl {
     /** The size of the NdbRecord struct */
     protected final static int SIZEOF_RECORD_SPECIFICATION = ClusterConnectionServiceImpl.SIZEOF_RECORD_SPECIFICATION;
 
-    /** The NdbRecord for this operation */
+    /** The NdbRecord for this operation, created at construction */
     private NdbRecord ndbRecord = null;
 
     /** The store columns for this operation */
-    protected List<Column> storeColumns = new ArrayList<Column>();
+    protected Column[] storeColumns = null;
 
     /** The RecordSpecificationArray used to define the columns in the NdbRecord */
     private RecordSpecificationArray recordSpecificationArray;
@@ -77,27 +77,30 @@ public class NdbRecordImpl {
     /** The NdbTable */
     TableConst tableConst;
 
-    /** The size of the receive buffer for this operation (may be zero for non-read operations) */
+    /** The size of the receive buffer for this operation */
     protected int bufferSize;
 
-    /** The maximum column id for this operation (may be zero for non-read operations) */
+    /** The maximum column id for this operation */
     protected int maximumColumnId;
 
-    /** The offsets into the buffer for each column (may be null for non-read operations) */
+    /** The offsets into the buffer for each column */
     protected int[] offsets;
 
+    /** The lengths of the column data */
+    protected int[] lengths;
+
     /** Values for setting column mask and null bit mask */
     protected final static byte[] BIT_IN_BYTE_MASK = new byte[] {1, 2, 4, 8, 16, 32, 64, -128};
 
-    /** The position in the null indicator for the field */
-    protected int nullablePositions[] = null;
-
     /** The null indicator for the field bit in the byte */
     protected int nullbitBitInByte[] = null;
 
     /** The null indicator for the field byte offset*/
     protected int nullbitByteOffset[] = null;
 
+    /** The size of the null indicator byte array */
+    protected int nullIndicatorSize;
+
     /** The maximum length of any column in this operation */
     protected int maximumColumnLength;
 
@@ -122,13 +125,13 @@ public class NdbRecordImpl {
         this.numberOfColumns = tableConst.getNoOfColumns();
         this.recordSpecificationArray = RecordSpecificationArray.create(numberOfColumns);
         this.offsets = new int[numberOfColumns];
-        this.nullablePositions = new int[numberOfColumns];
+        this.lengths = new int[numberOfColumns];
         this.nullbitBitInByte = new int[numberOfColumns];
         this.nullbitByteOffset = new int[numberOfColumns];
+        this.storeColumns = new Column[numberOfColumns];
         this.ndbRecord = createNdbRecord(storeTable, ndbDictionary);
     }
 
-
     public int setBigInteger(ByteBuffer buffer, Column storeColumn, BigInteger value) {
         int columnId = storeColumn.getColumnId();
         int newPosition = offsets[columnId];
@@ -140,7 +143,12 @@ public class NdbRecordImpl {
 
     public int setByte(ByteBuffer buffer, Column storeColumn, byte value) {
         int columnId = storeColumn.getColumnId();
-        buffer.put(offsets[columnId], value);
+        if (storeColumn.getLength() == 4) {
+            // the byte is stored as a BIT array of four bytes
+            buffer.putInt(offsets[columnId], value);
+        } else {
+            buffer.put(offsets[columnId], (byte)value);
+        }
         return columnId;
     }
 
@@ -156,6 +164,7 @@ public class NdbRecordImpl {
         int columnId = storeColumn.getColumnId();
         int newPosition = offsets[columnId];
         buffer.position(newPosition);
+        // TODO provide the buffer to Utility.convertValue to avoid copying
         ByteBuffer decimalBuffer = Utility.convertValue(storeColumn, value);
         buffer.put(decimalBuffer);
         return columnId;
@@ -175,7 +184,8 @@ public class NdbRecordImpl {
 
     public int setInt(ByteBuffer buffer, Column storeColumn, Integer value) {
         int columnId = storeColumn.getColumnId();
-        buffer.putInt(offsets[columnId], value);
+        int storageValue = Utility.convertIntValueForStorage(storeColumn, value);
+        buffer.putInt(offsets[columnId], storageValue);
         return columnId;
     }
 
@@ -197,19 +207,275 @@ public class NdbRecordImpl {
 
     public int setShort(ByteBuffer buffer, Column storeColumn, Short value) {
         int columnId = storeColumn.getColumnId();
-        buffer.putShort(offsets[columnId], value);
+        if (storeColumn.getLength() == 4) {
+            // the short is stored as a BIT array of four bytes
+            buffer.putInt(offsets[columnId], value);
+        } else {
+            buffer.putShort(offsets[columnId], (short)value);
+        }
         return columnId;
     }
 
     public int setString(ByteBuffer buffer, BufferManager bufferManager, Column storeColumn, String value) {
         int columnId = storeColumn.getColumnId();
         buffer.position(offsets[columnId]);
+        // TODO provide the buffer to Utility.encode to avoid copying
         // for now, use the encode method to encode the value then copy it
         ByteBuffer converted = Utility.encode(value, storeColumn, bufferManager);
         buffer.put(converted);
         return columnId;
     }
 
+    public boolean getBoolean(ByteBuffer buffer, int columnId) {
+        int value = buffer.getInt(offsets[columnId]);
+        return Utility.getBoolean(storeColumns[columnId], value);
+    }
+
+    public byte getByte(ByteBuffer buffer, int columnId) {
+        Column storeColumn = storeColumns[columnId];
+        if (storeColumn.getLength() == 4) {
+            // the byte was stored in a BIT column as four bytes
+            return (byte)buffer.get(offsets[columnId]);
+        } else {
+            // the byte was stored as a byte
+            return buffer.get(offsets[columnId]);
+        }
+    }
+
+    public byte[] getBytes(ByteBuffer byteBuffer, int columnId) {
+        return getBytes(byteBuffer, storeColumns[columnId]);
+    }
+
+    public byte[] getBytes(ByteBuffer byteBuffer, Column storeColumn) {
+        int columnId = storeColumn.getColumnId();
+        if (isNull(byteBuffer, columnId)) {
+            return null;
+        }
+        int prefixLength = storeColumn.getPrefixLength();
+        int actualLength = lengths[columnId];
+        int offset = offsets[columnId];
+        switch (prefixLength) {
+            case 0:
+                break;
+            case 1:
+                actualLength = (byteBuffer.get(offset) + 256) % 256;
+                offset += 1;
+                break;
+            case 2:
+                actualLength = (byteBuffer.get(offset) + 256) % 256;
+                int length2 = (byteBuffer.get(offset + 1) + 256) % 256;
+                actualLength += 256 * length2;
+                offset += 2;
+                break;
+            default:
+                throw new ClusterJFatalInternalException(
+                        local.message("ERR_Invalid_Prefix_Length", prefixLength));
+        }
+        byteBuffer.position(offset);
+        byte[] result = new byte[actualLength];
+        byteBuffer.get(result);
+        return result;
+     }
+
+    public double getDouble(ByteBuffer buffer, int columnId) {
+        buffer.position(offsets[columnId]);
+        double result = buffer.getDouble();
+        return result;
+    }
+
+    public float getFloat(ByteBuffer buffer, int columnId) {
+        buffer.position(offsets[columnId]);
+        float result = buffer.getFloat();
+        return result;
+    }
+
+    public int getInt(ByteBuffer buffer, int columnId) {
+        buffer.position(offsets[columnId]);
+        int value = buffer.getInt();
+        return Utility.getInt(storeColumns[columnId], value);
+    }
+
+    public long getLong(ByteBuffer buffer, int columnId) {
+        buffer.position(offsets[columnId]);
+        long value = buffer.getLong();
+        return Utility.getLong(storeColumns[columnId], value);
+    }
+
+    public short getShort(ByteBuffer buffer, int columnId) {
+        Column storeColumn = storeColumns[columnId];
+        if (storeColumn.getLength() == 4) {
+            // the short was stored in a BIT column as four bytes
+            return (short)buffer.get(offsets[columnId]);
+        } else {
+            // the short was stored as a short
+            return buffer.getShort(offsets[columnId]);
+        }
+    }
+
+    public String getString(ByteBuffer byteBuffer, int columnId, BufferManager bufferManager) {
+      if (isNull(byteBuffer, columnId)) {
+          return null;
+      }
+      Column storeColumn = storeColumns[columnId];
+      int prefixLength = storeColumn.getPrefixLength();
+      int actualLength;
+      int offset = offsets[columnId];
+      byteBuffer.limit(byteBuffer.capacity());
+      switch (prefixLength) {
+          case 0:
+              actualLength = lengths[columnId];
+              break;
+          case 1:
+              actualLength = (byteBuffer.get(offset) + 256) % 256;
+              offset += 1;
+              break;
+          case 2:
+              actualLength = (byteBuffer.get(offset) + 256) % 256;
+              int length2 = (byteBuffer.get(offset + 1) + 256) % 256;
+              actualLength += 256 * length2;
+              offset += 2;
+              break;
+          default:
+              throw new ClusterJFatalInternalException(
+                      local.message("ERR_Invalid_Prefix_Length", prefixLength));
+      }
+      byteBuffer.position(offset);
+      byteBuffer.limit(offset + actualLength);
+
+      String result = Utility.decode(byteBuffer, storeColumn.getCharsetNumber(), bufferManager);
+      return result;
+    }
+
+    public BigInteger getBigInteger(ByteBuffer byteBuffer, int columnId) {
+        Column storeColumn = storeColumns[columnId];
+        int index = storeColumn.getColumnId();
+        int offset = offsets[index];
+        int precision = storeColumn.getPrecision();
+        int scale = storeColumn.getScale();
+        int length = Utility.getDecimalColumnSpace(precision, scale);
+        byteBuffer.position(offset);
+        return Utility.getBigInteger(byteBuffer, length, precision, scale);
+    }
+
+    public BigInteger getBigInteger(ByteBuffer byteBuffer, Column storeColumn) {
+        int index = storeColumn.getColumnId();
+        int offset = offsets[index];
+        int precision = storeColumn.getPrecision();
+        int scale = storeColumn.getScale();
+        int length = Utility.getDecimalColumnSpace(precision, scale);
+        byteBuffer.position(offset);
+        return Utility.getBigInteger(byteBuffer, length, precision, scale);
+    }
+
+    public BigDecimal getDecimal(ByteBuffer byteBuffer, int columnId) {
+        Column storeColumn = storeColumns[columnId];
+        int index = storeColumn.getColumnId();
+        int offset = offsets[index];
+        int precision = storeColumn.getPrecision();
+        int scale = storeColumn.getScale();
+        int length = Utility.getDecimalColumnSpace(precision, scale);
+        byteBuffer.position(offset);
+        return Utility.getDecimal(byteBuffer, length, precision, scale);
+      }
+
+    public BigDecimal getDecimal(ByteBuffer byteBuffer, Column storeColumn) {
+      int index = storeColumn.getColumnId();
+      int offset = offsets[index];
+      int precision = storeColumn.getPrecision();
+      int scale = storeColumn.getScale();
+      int length = Utility.getDecimalColumnSpace(precision, scale);
+      byteBuffer.position(offset);
+      return Utility.getDecimal(byteBuffer, length, precision, scale);
+    }
+
+    public Boolean getObjectBoolean(ByteBuffer byteBuffer, int columnId) {
+        if (isNull(byteBuffer, columnId)) {
+            return null;
+        }
+        return Boolean.valueOf(getBoolean(byteBuffer, columnId));        
+    }
+
+    public Boolean getObjectBoolean(ByteBuffer byteBuffer, Column storeColumn) {
+        return getObjectBoolean(byteBuffer, storeColumn.getColumnId());
+    }
+
+    public Byte getObjectByte(ByteBuffer byteBuffer, int columnId) {
+        if (isNull(byteBuffer, columnId)) {
+            return null;
+        }
+        return getByte(byteBuffer, columnId);        
+    }
+
+    public Byte getObjectByte(ByteBuffer byteBuffer, Column storeColumn) {
+        return getObjectByte(byteBuffer, storeColumn.getColumnId());
+    }
+
+    public Float getObjectFloat(ByteBuffer byteBuffer, int columnId) {
+        if (isNull(byteBuffer, columnId)) {
+            return null;
+        }
+        return getFloat(byteBuffer, columnId);        
+    }
+
+    public Float getObjectFloat(ByteBuffer byteBuffer, Column storeColumn) {
+        return getObjectFloat(byteBuffer, storeColumn.getColumnId());
+    }
+
+    public Double getObjectDouble(ByteBuffer byteBuffer, int columnId) {
+        if (isNull(byteBuffer, columnId)) {
+            return null;
+        }
+        return getDouble(byteBuffer, columnId);        
+    }
+
+    public Double getObjectDouble(ByteBuffer byteBuffer, Column storeColumn) {
+        return getObjectDouble(byteBuffer, storeColumn.getColumnId());
+    }
+
+    public Integer getObjectInteger(ByteBuffer byteBuffer, int columnId) {
+        if (isNull(byteBuffer, columnId)) {
+            return null;
+        }
+        return getInt(byteBuffer, columnId);        
+    }
+
+    public Integer getObjectInteger(ByteBuffer byteBuffer, Column storeColumn) {
+        return getObjectInteger(byteBuffer, storeColumn.getColumnId());
+    }
+
+    public Long getObjectLong(ByteBuffer byteBuffer, int columnId) {
+        if (isNull(byteBuffer, columnId)) {
+            return null;
+        }
+        return getLong(byteBuffer, columnId);        
+    }
+
+    public Long getObjectLong(ByteBuffer byteBuffer, Column storeColumn) {
+        return getObjectLong(byteBuffer, storeColumn.getColumnId());
+    }
+
+    public Short getObjectShort(ByteBuffer byteBuffer, int columnId) {
+        if (isNull(byteBuffer, columnId)) {
+            return null;
+        }
+        return getShort(byteBuffer, columnId);        
+    }
+
+    public Short getObjectShort(ByteBuffer byteBuffer, Column storeColumn) {
+        return getObjectShort(byteBuffer, storeColumn.getColumnId());
+    }
+
+    public boolean isNull(ByteBuffer buffer, int columnId) {
+        if (!storeColumns[columnId].getNullable()) {
+            return false;
+        }
+        int index = nullbitByteOffset[columnId];
+        byte mask = BIT_IN_BYTE_MASK[nullbitBitInByte[columnId]];
+        byte nullbyte = buffer.get(index);
+        boolean result = (nullbyte & mask) != 0;
+        return result;
+    }
+
     protected static void handleError(Object object, Dictionary ndbDictionary) {
         if (object != null) {
             return;
@@ -225,9 +491,11 @@ public class NdbRecordImpl {
         List<Column> align2 = new ArrayList<Column>();
         List<Column> align1 = new ArrayList<Column>();
         List<Column> nullables = new ArrayList<Column>();
+        int i = 0;
         for (String columnName: columnNames) {
             Column storeColumn = storeTable.getColumn(columnName);
-            storeColumns.add(storeColumn);
+            lengths[i] = storeColumn.getLength();
+            storeColumns[i++] = storeColumn;
             // for each column, put into alignment bucket
             switch (storeColumn.getType()) {
                 case Bigint:
@@ -285,6 +553,7 @@ public class NdbRecordImpl {
         offset = nullables.size() + 7 / 8;
         // align the first column following the nullable column indicators to 8
         offset = (7 + offset) / 8 * 8;
+        nullIndicatorSize = offset;
         for (Column storeColumn: align8) {
             handleColumn(8, storeColumn);
         }
@@ -299,6 +568,8 @@ public class NdbRecordImpl {
         }
         bufferSize = offset;
 
+        if (logger.isDebugEnabled()) logger.debug(dump());
+
         // now create an NdbRecord
         NdbRecord result = ndbDictionary.createRecord(tableConst, recordSpecificationArray,
                 numberOfColumns, SIZEOF_RECORD_SPECIFICATION, 0);
@@ -322,26 +593,41 @@ public class NdbRecordImpl {
         recordSpecification.offset(offset);
         offsets[columnId] = offset;
         int columnSpace = storeColumn.getColumnSpace();
-        offset += (columnSpace==0)?8:columnSpace;
+        offset += ((columnSpace==0)?alignment:columnSpace);
         if (storeColumn.getNullable()) {
-            nullablePositions[columnId] = nullablePosition++;
             int nullbitByteOffsetValue = nullablePosition/8;
             int nullbitBitInByteValue = nullablePosition - nullablePosition / 8 * 8;
             nullbitBitInByte[columnId] = nullbitBitInByteValue;
             nullbitByteOffset[columnId] = nullbitByteOffsetValue;
             recordSpecification.nullbit_byte_offset(nullbitByteOffsetValue);
             recordSpecification.nullbit_bit_in_byte(nullbitBitInByteValue);
+            ++nullablePosition;
         } else {
             recordSpecification.nullbit_byte_offset(0);
             recordSpecification.nullbit_bit_in_byte(0);
         }
-        if (logger.isDetailEnabled()) logger.detail(
-                "column: " + storeColumn.getName()
-                + " columnSpace: " + columnSpace 
-                + " offset: " + offsets[columnId]
-                + " nullable position: " + nullablePositions[columnId]
-                + " nullbitByteOffset: " + nullbitByteOffset[columnId]
-                + " nullbitBitInByte: " +  nullbitBitInByte[columnId]);
+    }
+
+    private String dump() {
+        StringBuilder builder = new StringBuilder(tableConst.getName());
+        builder.append(" numberOfColumns: ");
+        builder.append(numberOfColumns);
+        builder.append('\n');
+        for (int columnId = 0; columnId < numberOfColumns; ++columnId) {
+            Column storeColumn = storeColumns[columnId];
+            builder.append(" column: ");
+            builder.append(storeColumn.getName());
+            builder.append(" offset: ");
+            builder.append(offsets[columnId]);
+            builder.append(" length: ");
+            builder.append(lengths[columnId]);
+            builder.append(" nullbitBitInByte: ");
+            builder.append(nullbitBitInByte[columnId]);
+            builder.append(" nullbitByteOffset: ");
+            builder.append(nullbitByteOffset[columnId]);
+            builder.append('\n');
+        }
+        return builder.toString();
     }
 
     TableConst getNdbTable(String tableName) {
@@ -367,9 +653,14 @@ public class NdbRecordImpl {
 
     protected void releaseNdbRecord() {
         if (ndbRecord != null) {
+            if (logger.isDebugEnabled())logger.debug("Releasing NdbRecord for " + tableConst.getName());
             ndbDictionary.releaseRecord(ndbRecord);
             ndbRecord = null;
         }
     }
 
+    public int getNullIndicatorSize() {
+        return nullIndicatorSize;
+    }
+
 }

=== added file 'storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordInsertOperationImpl.java'
--- a/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordInsertOperationImpl.java	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordInsertOperationImpl.java	2012-02-08 17:27:45 +0000
@@ -0,0 +1,63 @@
+/*
+ *  Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+ */
+
+package com.mysql.clusterj.tie;
+
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+
+import com.mysql.clusterj.core.store.Table;
+
+public class NdbRecordInsertOperationImpl extends NdbRecordOperationImpl {
+
+    /** The number of columns for this operation */
+    protected int numberOfColumns;
+
+    public NdbRecordInsertOperationImpl(ClusterTransactionImpl clusterTransaction, Table storeTable) {
+        super(clusterTransaction);
+        this.ndbRecordValues = clusterTransaction.getCachedNdbRecordImpl(storeTable);
+        this.ndbRecordKeys = ndbRecordValues;
+        this.valueBufferSize = ndbRecordValues.getBufferSize();
+        this.numberOfColumns = ndbRecordValues.getNumberOfColumns();
+        this.blobs = new NdbRecordBlobImpl[this.numberOfColumns];
+    }
+
+    public void beginDefinition() {
+        // allocate a buffer for the operation data
+        valueBuffer = ByteBuffer.allocateDirect(valueBufferSize);
+        // use platform's native byte ordering
+        valueBuffer.order(ByteOrder.nativeOrder());
+        // use value buffer for key buffer also
+        keyBuffer = valueBuffer;
+        mask = new byte[1 + (numberOfColumns/8)];
+    }
+
+    public void endDefinition() {
+        // position the buffer at the beginning for ndbjtie
+        valueBuffer.position(0);
+        valueBuffer.limit(valueBufferSize);
+        // create the insert operation
+        ndbOperation = clusterTransaction.insertTuple(ndbRecordValues.getNdbRecord(), valueBuffer, mask, null);
+        // now set the NdbBlob into the blobs
+        for (NdbRecordBlobImpl blob: activeBlobs) {
+            if (blob != null) {
+                blob.setNdbBlob();
+            }
+        }
+    }
+
+}

=== added file 'storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordKeyOperationImpl.java'
--- a/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordKeyOperationImpl.java	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordKeyOperationImpl.java	2012-02-08 17:27:45 +0000
@@ -0,0 +1,114 @@
+/*
+   Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+ */
+
+package com.mysql.clusterj.tie;
+
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+
+import com.mysql.clusterj.core.store.Column;
+import com.mysql.clusterj.core.store.ResultData;
+import com.mysql.clusterj.core.store.Table;
+
+public class NdbRecordKeyOperationImpl extends NdbRecordOperationImpl {
+
+    /** The number of columns in the table */
+    protected int numberOfColumns;
+
+    public NdbRecordKeyOperationImpl(ClusterTransactionImpl clusterTransaction, Table storeTable) {
+        super(clusterTransaction);
+        this.ndbRecordKeys = clusterTransaction.getCachedNdbRecordImpl(storeTable);
+        this.keyBufferSize = ndbRecordKeys.getBufferSize();
+        this.ndbRecordValues = clusterTransaction.getCachedNdbRecordImpl(storeTable);
+        this.valueBufferSize = ndbRecordValues.getBufferSize();
+        this.numberOfColumns = ndbRecordValues.getNumberOfColumns();
+        this.blobs = new NdbRecordBlobImpl[this.numberOfColumns];
+    }
+
+    public void beginDefinition() {
+        // allocate a buffer for the key data
+        keyBuffer = ByteBuffer.allocateDirect(keyBufferSize);
+        keyBuffer.order(ByteOrder.nativeOrder());
+        // allocate a buffer for the value result data
+        valueBuffer = ByteBuffer.allocateDirect(valueBufferSize);
+        valueBuffer.order(ByteOrder.nativeOrder());
+        mask = new byte[1 + (numberOfColumns/8)];
+    }
+
+    /** Specify the columns to be used for the operation.
+     */
+    public void getValue(Column storeColumn) {
+        int columnId = storeColumn.getColumnId();
+        columnSet(columnId);
+    }
+
+    /**
+     * Mark this blob column to be read.
+     * @param storeColumn the store column
+     */
+    @Override
+    public void getBlob(Column storeColumn) {
+        // create an NdbRecordBlobImpl for the blob
+        int columnId = storeColumn.getColumnId();
+        columnSet(columnId);
+        NdbRecordBlobImpl blob = new NdbRecordBlobImpl(this, storeColumn);
+        blobs[columnId] = blob;
+    }
+
+    public void endDefinition() {
+        // position the key buffer at the beginning for ndbjtie
+        keyBuffer.position(0);
+        keyBuffer.limit(keyBufferSize);
+        // position the value buffer at the beginning for ndbjtie
+        valueBuffer.position(0);
+        valueBuffer.limit(valueBufferSize);
+        // create the key operation
+        ndbOperation = clusterTransaction.readTuple(ndbRecordKeys.getNdbRecord(), keyBuffer,
+                ndbRecordValues.getNdbRecord(), valueBuffer, mask, null);
+        // set up a callback when this operation is executed
+        clusterTransaction.postExecuteCallback(new Runnable() {
+            public void run() {
+                for (int columnId = 0; columnId < numberOfColumns; ++columnId) {
+                    NdbRecordBlobImpl blob = blobs[columnId];
+                    if (blob != null) {
+                        blob.setNdbBlob();
+                    }
+                }
+            }
+        });
+    }
+
+    /** Construct a new ResultData using the saved column data and then execute the operation.
+     */
+    @Override
+    public ResultData resultData() {
+        return resultData(true);
+    }
+
+    /** Construct a new ResultData and if requested, execute the operation.
+     */
+    @Override
+    public ResultData resultData(boolean execute) {
+        NdbRecordResultDataImpl result =
+            new NdbRecordResultDataImpl(this, ndbRecordValues, valueBuffer, bufferManager);
+        if (execute) {
+            clusterTransaction.executeNoCommit(false, true);
+        }
+        return result;
+    }
+
+}

=== modified file 'storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordOperationImpl.java'
--- a/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordOperationImpl.java	2012-01-23 00:44:39 +0000
+++ b/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordOperationImpl.java	2012-02-08 17:27:45 +0000
@@ -21,7 +21,6 @@ import java.math.BigDecimal;
 import java.math.BigInteger;
 
 import java.nio.ByteBuffer;
-import java.nio.ByteOrder;
 
 import java.util.ArrayList;
 import java.util.List;
@@ -32,7 +31,6 @@ import com.mysql.clusterj.core.store.Blo
 import com.mysql.clusterj.core.store.Column;
 import com.mysql.clusterj.core.store.Operation;
 import com.mysql.clusterj.core.store.ResultData;
-import com.mysql.clusterj.core.store.Table;
 import com.mysql.clusterj.core.util.I18NHelper;
 import com.mysql.clusterj.core.util.Logger;
 import com.mysql.clusterj.core.util.LoggerFactoryService;
@@ -46,7 +44,7 @@ import com.mysql.ndbjtie.ndbapi.NdbDicti
 /**
  * Implementation of store operation that uses NdbRecord.
  */
-class NdbRecordOperationImpl implements Operation {
+public abstract class NdbRecordOperationImpl implements Operation {
 
     /** My message translator */
     static final I18NHelper local = I18NHelper
@@ -60,16 +58,22 @@ class NdbRecordOperationImpl implements
     protected ClusterTransactionImpl clusterTransaction;
 
     /** The NdbOperation wrapped by this object */
-    private NdbOperationConst ndbOperation = null;
+    protected NdbOperationConst ndbOperation = null;
 
-    /** The NdbRecord for this operation */
-    private NdbRecordImpl ndbRecordImpl = null;
+    /** The NdbRecord for keys */
+    protected NdbRecordImpl ndbRecordKeys = null;
 
-    /** The mask for this operation, which contains a bit set for each column to be inserted */
+    /** The NdbRecord for values */
+    protected NdbRecordImpl ndbRecordValues = null;
+
+    /** The mask for this operation, which contains a bit set for each column accessed */
     byte[] mask;
 
-    /** The ByteBuffer containing all of the data */
-    ByteBuffer buffer = null;
+    /** The ByteBuffer containing keys */
+    ByteBuffer keyBuffer = null;
+
+    /** The ByteBuffer containing values */
+    ByteBuffer valueBuffer = null;
 
     /** Blobs for this NdbRecord */
     protected NdbRecordBlobImpl[] blobs = null;
@@ -77,75 +81,85 @@ class NdbRecordOperationImpl implements
     /** Blobs that have been accessed for this operation */
     protected List<NdbRecordBlobImpl> activeBlobs = new ArrayList<NdbRecordBlobImpl>();
 
-    /** The size of the receive buffer for this operation (may be zero for non-read operations) */
-    protected int bufferSize;
+    /** The size of the key buffer for this operation */
+    protected int keyBufferSize;
+
+    /** The size of the value buffer for this operation */
+    protected int valueBufferSize;
 
-    /** The number of columns for this operation */
-    protected int numberOfColumns;
+    /** The size of the null indicator byte array */
+    protected int nullIndicatorSize;
 
+    /** The buffer manager for string encode and decode */
     protected BufferManager bufferManager;
 
     /** Constructor used for insert and delete operations that do not need to read data.
      * 
      * @param clusterTransaction the cluster transaction
-     * @param transaction the ndb transaction
-     * @param storeTable the store table
      */
-    public NdbRecordOperationImpl(ClusterTransactionImpl clusterTransaction, Table storeTable) {
-        this.ndbRecordImpl = clusterTransaction.getCachedNdbRecordImpl(storeTable);
-        this.bufferSize = ndbRecordImpl.getBufferSize();
-        this.numberOfColumns = ndbRecordImpl.getNumberOfColumns();
-        this.blobs = new NdbRecordBlobImpl[this.numberOfColumns];
+    public NdbRecordOperationImpl(ClusterTransactionImpl clusterTransaction) {
         this.clusterTransaction = clusterTransaction;
         this.bufferManager = clusterTransaction.getBufferManager();
     }
 
     public void equalBigInteger(Column storeColumn, BigInteger value) {
-        setBigInteger(storeColumn, value);
+        int columnId = ndbRecordKeys.setBigInteger(keyBuffer, storeColumn, value);
+        columnSet(columnId);
     }
 
     public void equalBoolean(Column storeColumn, boolean booleanValue) {
-        setBoolean(storeColumn, booleanValue);
+        byte value = (booleanValue?(byte)0x01:(byte)0x00);
+        equalByte(storeColumn, value);
     }
 
     public void equalByte(Column storeColumn, byte value) {
-        setByte(storeColumn, value);
+        int columnId = ndbRecordKeys.setByte(keyBuffer, storeColumn, value);
+        columnSet(columnId);
     }
 
     public void equalBytes(Column storeColumn, byte[] value) {
-        setBytes(storeColumn, value);
+        int columnId = ndbRecordKeys.setBytes(keyBuffer, storeColumn, value);
+        columnSet(columnId);
    }
 
     public void equalDecimal(Column storeColumn, BigDecimal value) {
-        setDecimal(storeColumn, value);
+        int columnId = ndbRecordKeys.setDecimal(keyBuffer, storeColumn, value);
+        columnSet(columnId);
     }
 
     public void equalDouble(Column storeColumn, double value) {
-        setDouble(storeColumn, value);
+        int columnId = ndbRecordKeys.setDouble(keyBuffer, storeColumn, value);
+        columnSet(columnId);
     }
 
     public void equalFloat(Column storeColumn, float value) {
-        setFloat(storeColumn, value);
+        int columnId = ndbRecordKeys.setFloat(keyBuffer, storeColumn, value);
+        columnSet(columnId);
     }
 
     public void equalInt(Column storeColumn, int value) {
-        setInt(storeColumn, value);
+        int columnId = ndbRecordKeys.setInt(keyBuffer, storeColumn, value);
+        columnSet(columnId);
     }
 
     public void equalShort(Column storeColumn, short value) {
-        setShort(storeColumn, value);
+        int columnId = ndbRecordKeys.setShort(keyBuffer, storeColumn, value);
+        columnSet(columnId);
     }
 
     public void equalLong(Column storeColumn, long value) {
-        setLong(storeColumn, value);
+        int columnId = ndbRecordKeys.setLong(keyBuffer, storeColumn, value);
+        columnSet(columnId);
     }
 
     public void equalString(Column storeColumn, String value) {
-        setString(storeColumn, value);
+        int columnId = ndbRecordKeys.setString(keyBuffer, bufferManager, storeColumn, value);
+        columnSet(columnId);
     }
 
     public void getBlob(Column storeColumn) {
-        throw new ClusterJFatalInternalException(local.message("ERR_Method_Not_Implemented", "getBlob"));
+        throw new ClusterJFatalInternalException(local.message("ERR_Method_Not_Implemented",
+                "NdbRecordOperationImpl.getBlob(Column)"));
     }
 
     /**
@@ -167,9 +181,11 @@ class NdbRecordOperationImpl implements
     }
 
     /** Specify the columns to be used for the operation.
+     * This is implemented by a subclass.
      */
     public void getValue(Column storeColumn) {
-        throw new ClusterJFatalInternalException(local.message("ERR_Method_Not_Implemented", "getValue"));
+        throw new ClusterJFatalInternalException(local.message("ERR_Method_Not_Implemented",
+                "NdbRecordOperationImpl.getValue(Column)"));
     }
 
     public void postExecuteCallback(Runnable callback) {
@@ -177,19 +193,23 @@ class NdbRecordOperationImpl implements
     }
 
     /** Construct a new ResultData using the saved column data and then execute the operation.
+     * This is implemented by a subclass.
      */
     public ResultData resultData() {
-        throw new ClusterJFatalInternalException(local.message("ERR_Method_Not_Implemented", "resultData"));
+        throw new ClusterJFatalInternalException(local.message("ERR_Method_Not_Implemented",
+                "NdbRecordOperationImpl.resultData()"));
     }
 
     /** Construct a new ResultData and if requested, execute the operation.
+     * This is implemented by a subclass.
      */
     public ResultData resultData(boolean execute) {
-        throw new ClusterJFatalInternalException(local.message("ERR_Method_Not_Implemented", "resultData"));
+        throw new ClusterJFatalInternalException(local.message("ERR_Method_Not_Implemented",
+                "NdbRecordOperationImpl.resultData(boolean)"));
     }
 
     public void setBigInteger(Column storeColumn, BigInteger value) {
-        int columnId = ndbRecordImpl.setBigInteger(buffer, storeColumn, value);
+        int columnId = ndbRecordValues.setBigInteger(valueBuffer, storeColumn, value);
         columnSet(columnId);
     }
 
@@ -199,52 +219,52 @@ class NdbRecordOperationImpl implements
     }
 
     public void setByte(Column storeColumn, byte value) {
-        int columnId = ndbRecordImpl.setByte(buffer, storeColumn, value);
+        int columnId = ndbRecordValues.setByte(valueBuffer, storeColumn, value);
         columnSet(columnId);
     }
 
     public void setBytes(Column storeColumn, byte[] value) {
-        int columnId = ndbRecordImpl.setBytes(buffer, storeColumn, value);
+        int columnId = ndbRecordValues.setBytes(valueBuffer, storeColumn, value);
         columnSet(columnId);
     }
 
     public void setDecimal(Column storeColumn, BigDecimal value) {
-        int columnId = ndbRecordImpl.setDecimal(buffer, storeColumn, value);
+        int columnId = ndbRecordValues.setDecimal(valueBuffer, storeColumn, value);
         columnSet(columnId);
     }
 
     public void setDouble(Column storeColumn, Double value) {
-        int columnId = ndbRecordImpl.setDouble(buffer, storeColumn, value);
+        int columnId = ndbRecordValues.setDouble(valueBuffer, storeColumn, value);
         columnSet(columnId);
     }
 
     public void setFloat(Column storeColumn, Float value) {
-        int columnId = ndbRecordImpl.setFloat(buffer, storeColumn, value);
+        int columnId = ndbRecordValues.setFloat(valueBuffer, storeColumn, value);
         columnSet(columnId);
     }
 
     public void setInt(Column storeColumn, Integer value) {
-        int columnId = ndbRecordImpl.setInt(buffer, storeColumn, value);
+        int columnId = ndbRecordValues.setInt(valueBuffer, storeColumn, value);
         columnSet(columnId);
     }
 
     public void setLong(Column storeColumn, long value) {
-        int columnId = ndbRecordImpl.setLong(buffer, storeColumn, value);
+        int columnId = ndbRecordValues.setLong(valueBuffer, storeColumn, value);
         columnSet(columnId);
     }
 
     public void setNull(Column storeColumn) {
-        int columnId = ndbRecordImpl.setNull(buffer, storeColumn);
+        int columnId = ndbRecordValues.setNull(valueBuffer, storeColumn);
         columnSet(columnId);
     }
 
     public void setShort(Column storeColumn, Short value) {
-        int columnId = ndbRecordImpl.setShort(buffer, storeColumn, value);
+        int columnId = ndbRecordValues.setShort(valueBuffer, storeColumn, value);
         columnSet(columnId);
     }
 
     public void setString(Column storeColumn, String value) {
-        int columnId = ndbRecordImpl.setString(buffer, bufferManager, storeColumn, value);
+        int columnId = ndbRecordValues.setString(valueBuffer, bufferManager, storeColumn, value);
         columnSet(columnId);
     }
 
@@ -276,28 +296,6 @@ class NdbRecordOperationImpl implements
         }
     }
 
-    public void beginDefinition() {
-        // allocate a buffer for the operation data
-        buffer = ByteBuffer.allocateDirect(bufferSize);
-        // use platform's native byte ordering
-        buffer.order(ByteOrder.nativeOrder());
-        mask = new byte[1 + (numberOfColumns/8)];
-    }
-
-    public void endDefinition() {
-        // create the insert operation
-        buffer.position(0);
-        buffer.limit(bufferSize);
-        // create the insert operation
-        ndbOperation = clusterTransaction.insertTuple(ndbRecordImpl.getNdbRecord(), buffer, mask, null);
-        // now set the NdbBlob into the blobs
-        for (NdbRecordBlobImpl blob: activeBlobs) {
-            if (blob != null) {
-                blob.setNdbBlob();
-            }
-        }
-    }
-
     public NdbBlob getNdbBlob(Column storeColumn) {
         NdbBlob result = ndbOperation.getBlobHandle(storeColumn.getColumnId());
         handleError(result, ndbOperation);
@@ -308,11 +306,15 @@ class NdbRecordOperationImpl implements
      * Set this column into the mask for NdbRecord operation.
      * @param columnId the column id
      */
-    private void columnSet(int columnId) {
+    protected void columnSet(int columnId) {
         int byteOffset = columnId / 8;
         int bitInByte = columnId - (byteOffset * 8);
         mask[byteOffset] |= NdbRecordImpl.BIT_IN_BYTE_MASK[bitInByte];
         
     }
 
+    public NdbRecordImpl getValueNdbRecord() {
+        return ndbRecordValues;
+    }
+
 }

=== added file 'storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordResultDataImpl.java'
--- a/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordResultDataImpl.java	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordResultDataImpl.java	2012-02-08 17:27:45 +0000
@@ -0,0 +1,282 @@
+/*
+   Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+ */
+
+package com.mysql.clusterj.tie;
+
+import java.math.BigDecimal;
+import java.math.BigInteger;
+
+import java.nio.ByteBuffer;
+import com.mysql.clusterj.ClusterJFatalInternalException;
+
+import com.mysql.clusterj.core.store.Blob;
+import com.mysql.clusterj.core.store.Column;
+import com.mysql.clusterj.core.store.ResultData;
+
+import com.mysql.clusterj.core.util.I18NHelper;
+import com.mysql.clusterj.core.util.Logger;
+import com.mysql.clusterj.core.util.LoggerFactoryService;
+import com.mysql.clusterj.tie.DbImpl.BufferManager;
+
+/**
+ *
+ */
+class NdbRecordResultDataImpl implements ResultData {
+
+    /** My message translator */
+    static final I18NHelper local = I18NHelper
+            .getInstance(NdbRecordResultDataImpl.class);
+
+    /** My logger */
+    static final Logger logger = LoggerFactoryService.getFactory()
+            .getInstance(NdbRecordResultDataImpl.class);
+
+    /** Flags for iterating a scan */
+    protected final int RESULT_READY = 0;
+    protected final int SCAN_FINISHED = 1;
+    protected final int CACHE_EMPTY = 2;
+
+    /** The NdbOperation that defines the result */
+    private NdbRecordOperationImpl operation = null;
+
+    /** The NdbRecordImpl that defines the buffer layout */
+    private NdbRecordImpl record = null;
+
+    /** The flag indicating that there are no more results */
+    private boolean nextDone;
+
+    /** The ByteBuffer containing the results */
+    private ByteBuffer buffer = null;
+
+    /** The buffer manager */
+    private BufferManager bufferManager;
+
+    /** Construct the ResultDataImpl based on an NdbRecordOperationImpl, and the 
+     * buffer manager to help with string columns.
+     * @param operation the NdbRecordOperationImpl
+     * @param bufferManager the buffer manager
+     */
+    public NdbRecordResultDataImpl(NdbRecordOperationImpl operation, NdbRecordImpl ndbRecordImpl,
+            ByteBuffer buffer, BufferManager bufferManager) {
+        this.operation = operation;
+        this.record = ndbRecordImpl;
+        this.bufferManager = bufferManager;
+        this.buffer = buffer;
+    }
+
+    public boolean next() {
+        // NdbOperation has exactly zero or one result. ScanResultDataImpl handles scans...
+        // if the ndbOperation reports an error there is no result
+        int errorCode = operation.errorCode();
+        if (errorCode != 0) {
+            setNoResult();
+        }
+        if (nextDone) {
+            return false;
+        } else {
+            nextDone = true;
+            return true;
+        }
+    }
+
+    public Blob getBlob(int columnId) {
+        throw new ClusterJFatalInternalException(local.message("ERR_Method_Not_Implemented",
+                "NdbRecordResultDataImpl.getBlob(int)"));
+    }
+
+    public Blob getBlob(Column storeColumn) {
+        return operation.getBlobHandle(storeColumn);
+    }
+
+    public boolean getBoolean(int columnId) {
+        return record.getBoolean(buffer, columnId);
+    }
+
+    public boolean getBoolean(Column storeColumn) {
+        return record.getBoolean(buffer, storeColumn.getColumnId());
+    }
+
+    public boolean[] getBooleans(int column) {
+        throw new ClusterJFatalInternalException(local.message("ERR_Method_Not_Implemented",
+                "NdbRecordResultDataImpl.getBooleans(int)"));
+    }
+
+    public boolean[] getBooleans(Column storeColumn) {
+        throw new ClusterJFatalInternalException(local.message("ERR_Method_Not_Implemented",
+                "NdbRecordResultDataImpl.getBooleans(Column)"));
+    }
+
+    public byte getByte(int columnId) {
+        return record.getByte(buffer, columnId);
+    }
+
+    public byte getByte(Column storeColumn) {
+        return record.getByte(buffer, storeColumn.getColumnId());
+    }
+
+    public short getShort(int columnId) {
+        return record.getShort(buffer, columnId);
+    }
+
+    public short getShort(Column storeColumn) {
+        return record.getShort(buffer, storeColumn.getColumnId());
+     }
+
+    public int getInt(int columnId) {
+        return record.getInt(buffer, columnId);
+    }
+
+    public int getInt(Column storeColumn) {
+        return getInt(storeColumn.getColumnId());
+    }
+
+    public long getLong(int columnId) {
+        return record.getLong(buffer, columnId);
+    }
+
+    public long getLong(Column storeColumn) {
+        return getLong(storeColumn.getColumnId());
+     }
+
+    public float getFloat(int columnId) {
+        return record.getFloat(buffer, columnId);
+    }
+
+    public float getFloat(Column storeColumn) {
+        return getFloat(storeColumn.getColumnId());
+    }
+
+    public double getDouble(int columnId) {
+        return record.getDouble(buffer, columnId);
+    }
+
+    public double getDouble(Column storeColumn) {
+        return getDouble(storeColumn.getColumnId());
+    }
+
+    public String getString(int columnId) {
+        return record.getString(buffer, columnId, bufferManager);
+    }
+
+    public String getString(Column storeColumn) {
+        return record.getString(buffer, storeColumn.getColumnId(), bufferManager);
+    }
+
+    public byte[] getBytes(int column) {
+        return record.getBytes(buffer, column);
+    }
+
+    public byte[] getBytes(Column storeColumn) {
+        return record.getBytes(buffer, storeColumn);
+     }
+
+    public Object getObject(int column) {
+        throw new ClusterJFatalInternalException(local.message("ERR_Method_Not_Implemented",
+        "NdbRecordResultDataImpl.getObject(int)"));
+    }
+
+    public Object getObject(Column storeColumn) {
+        throw new ClusterJFatalInternalException(local.message("ERR_Method_Not_Implemented",
+        "NdbRecordResultDataImpl.getObject(Column)"));
+    }
+
+    public boolean wasNull(Column storeColumn) {
+        throw new ClusterJFatalInternalException(local.message("ERR_Method_Not_Implemented",
+        "NdbRecordResultDataImpl.wasNull(Column)"));
+    }
+
+    public Boolean getObjectBoolean(int column) {
+        return record.getObjectBoolean(buffer, column);
+    }
+
+    public Boolean getObjectBoolean(Column storeColumn) {
+        return record.getObjectBoolean(buffer, storeColumn.getColumnId());
+    }
+
+    public Byte getObjectByte(int columnId) {
+        return record.getObjectByte(buffer, columnId);
+    }
+
+    public Byte getObjectByte(Column storeColumn) {
+        return record.getObjectByte(buffer, storeColumn.getColumnId());
+    }
+
+    public Float getObjectFloat(int column) {
+        return record.getObjectFloat(buffer, column);
+    }
+
+    public Float getObjectFloat(Column storeColumn) {
+        return record.getObjectFloat(buffer, storeColumn.getColumnId());
+    }
+
+    public Double getObjectDouble(int column) {
+        return record.getObjectDouble(buffer, column);
+    }
+
+    public Double getObjectDouble(Column storeColumn) {
+        return record.getObjectDouble(buffer, storeColumn.getColumnId());
+    }
+
+    public Integer getObjectInteger(int columnId) {
+        return record.getObjectInteger(buffer, columnId);
+    }
+
+    public Integer getObjectInteger(Column storeColumn) {
+        return record.getObjectInteger(buffer, storeColumn.getColumnId());
+    }
+
+    public Long getObjectLong(int column) {
+        return record.getObjectLong(buffer, column);
+    }
+
+    public Long getObjectLong(Column storeColumn) {
+        return record.getObjectLong(buffer, storeColumn.getColumnId());
+    }
+
+    public Short getObjectShort(int columnId) {
+        return record.getObjectShort(buffer, columnId);
+    }
+
+    public Short getObjectShort(Column storeColumn) {
+        return record.getObjectShort(buffer, storeColumn.getColumnId());
+    }
+
+    public BigInteger getBigInteger(int column) {
+        return record.getBigInteger(buffer, column);
+    }
+
+    public BigInteger getBigInteger(Column storeColumn) {
+        return record.getBigInteger(buffer, storeColumn);
+    }
+
+    public BigDecimal getDecimal(int column) {
+        return record.getDecimal(buffer, column);
+    }
+
+    public BigDecimal getDecimal(Column storeColumn) {
+        return record.getDecimal(buffer, storeColumn);
+    }
+
+    public void setNoResult() {
+        nextDone = true;
+    }
+
+    public Column[] getColumns() {
+        return null;
+    }
+
+}

=== modified file 'storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/Utility.java'
--- a/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/Utility.java	2012-01-23 00:44:39 +0000
+++ b/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/Utility.java	2012-02-08 17:27:45 +0000
@@ -83,6 +83,7 @@ public class Utility {
     static final int ooooffff = 0x0000ffff;
     static final int ooooffoo = 0x0000ff00;
     static final int ooffoooo = 0x00ff0000;
+    static final int ooffffff = 0x00ffffff;
 
     static final char[] SPACE_PAD = new char[255];
     static {
@@ -234,6 +235,19 @@ public class Utility {
             }
         }
 
+        public boolean getBoolean(Column storeColumn, int value) {
+            switch (storeColumn.getType()) {
+                case Bit:
+                    return value == 1;
+                case Tinyint:
+                    // the value is stored in the top 8 bits
+                    return (value >>> 24) == 1;
+                default:
+                    throw new ClusterJUserException(
+                            local.message("ERR_Unsupported_Mapping", storeColumn.getType(), "boolean"));
+            }
+        }
+
         public byte getByte(Column storeColumn, NdbRecAttr ndbRecAttr) {
             switch (storeColumn.getType()) {
                 case Bit:
@@ -246,6 +260,7 @@ public class Utility {
                             local.message("ERR_Unsupported_Mapping", storeColumn.getType(), "byte"));
             }
         }
+
         public short getShort(Column storeColumn, NdbRecAttr ndbRecAttr) {
             switch (storeColumn.getType()) {
                 case Bit:
@@ -257,6 +272,7 @@ public class Utility {
                             local.message("ERR_Unsupported_Mapping", storeColumn.getType(), "short"));
             }
         }
+
         public int getInt(Column storeColumn, NdbRecAttr ndbRecAttr) {
             switch (storeColumn.getType()) {
                 case Bit:
@@ -272,6 +288,25 @@ public class Utility {
                             local.message("ERR_Unsupported_Mapping", storeColumn.getType(), "int"));
             }
         }
+
+        public int getInt(Column storeColumn, int value) {
+            switch (storeColumn.getType()) {
+                case Bit:
+                case Int:
+                case Timestamp:
+                    return value;
+                case Date:
+                    // the unsigned value is stored in the top 3 bytes
+                    return value >>> 8;
+                case Time:
+                    // the signed value is stored in the top 3 bytes
+                    return value >> 8;
+                default:
+                    throw new ClusterJUserException(
+                            local.message("ERR_Unsupported_Mapping", storeColumn.getType(), "int"));
+            }
+        }
+
         public long getLong(Column storeColumn, NdbRecAttr ndbRecAttr) {
             switch (storeColumn.getType()) {
                 case Bit:
@@ -293,6 +328,31 @@ public class Utility {
                             local.message("ERR_Unsupported_Mapping", storeColumn.getType(), "long"));
             }
         }
+
+        public long getLong(Column storeColumn, long value) {
+            switch (storeColumn.getType()) {
+                case Bit:
+                    // the data is stored as two int values
+                    return (value >>> 32) | (value << 32);
+                case Bigint:
+                case Bigunsigned:
+                    return value;
+                case Datetime:
+                    return unpackDatetime(value);
+                case Timestamp:
+                    return (value >> 32) * 1000L;
+                case Date:
+                    // the unsigned value is stored in the top 3 bytes
+                    return unpackDate((int)(value >>> 40));
+                case Time:
+                    // the signed value is stored in the top 3 bytes
+                    return unpackTime((int)(value >> 40));
+                default:
+                    throw new ClusterJUserException(
+                            local.message("ERR_Unsupported_Mapping", storeColumn.getType(), "long"));
+            }
+        }
+
         /** Put the low order three bytes of the input value into the ByteBuffer as a medium_value.
          * The format for medium value is always little-endian even on big-endian architectures.
          * Do not flip the buffer, as the caller will do that if needed.
@@ -365,6 +425,17 @@ public class Utility {
             return result;
         }
 
+        public int convertIntValueForStorage(Column storeColumn, int value) {
+            switch (storeColumn.getType()) {
+                case Bit:
+                case Int:
+                    return value;
+                default:
+                    throw new ClusterJUserException(local.message(
+                            "ERR_Unsupported_Mapping", storeColumn.getType(), "int"));
+            }
+        }
+
         public ByteBuffer convertValue(Column storeColumn, long value) {
             ByteBuffer result = ByteBuffer.allocateDirect(8);
             return convertValue(storeColumn, value, result);
@@ -479,6 +550,12 @@ public class Utility {
             }
         }
 
+        public long convertLongValueFromStorage(Column storeColumn,
+                long fromStorage) {
+            // TODO Auto-generated method stub
+            return 0;
+        }
+
     }:
         /*
          * Little Endian algorithms to convert NdbRecAttr buffer into primitive types
@@ -497,6 +574,17 @@ public class Utility {
             }
         }
 
+        public boolean getBoolean(Column storeColumn, int value) {
+            switch (storeColumn.getType()) {
+                case Bit:
+                case Tinyint:
+                    return value == 1;
+                default:
+                    throw new ClusterJUserException(
+                            local.message("ERR_Unsupported_Mapping", storeColumn.getType(), "boolean"));
+            }
+        }
+
         public byte getByte(Column storeColumn, NdbRecAttr ndbRecAttr) {
             switch (storeColumn.getType()) {
                 case Bit:
@@ -508,6 +596,7 @@ public class Utility {
                             local.message("ERR_Unsupported_Mapping", storeColumn.getType(), "byte"));
             }
         }
+
         public short getShort(Column storeColumn, NdbRecAttr ndbRecAttr) {
             switch (storeColumn.getType()) {
                 case Bit:
@@ -518,6 +607,7 @@ public class Utility {
                             local.message("ERR_Unsupported_Mapping", storeColumn.getType(), "short"));
             }
         }
+
         public int getInt(Column storeColumn, NdbRecAttr ndbRecAttr) {
             switch (storeColumn.getType()) {
                 case Bit:
@@ -533,6 +623,24 @@ public class Utility {
                             local.message("ERR_Unsupported_Mapping", storeColumn.getType(), "int"));
             }
         }
+
+        public int getInt(Column storeColumn, int value) {
+            switch (storeColumn.getType()) {
+                case Bit:
+                case Int:
+                case Timestamp:
+                    return value;
+                case Date:
+                    return value & ooffffff;
+                case Time:
+                    // propagate the sign bit from 3 byte medium_int
+                    return (value << 8) >> 8;
+                default:
+                    throw new ClusterJUserException(
+                            local.message("ERR_Unsupported_Mapping", storeColumn.getType(), "int"));
+            }
+        }
+
         public long getLong(Column storeColumn, NdbRecAttr ndbRecAttr) {
             switch (storeColumn.getType()) {
                 case Bigint:
@@ -553,6 +661,26 @@ public class Utility {
             }
         }
 
+        public long getLong(Column storeColumn, long value) {
+            switch (storeColumn.getType()) {
+                case Bigint:
+                case Bigunsigned:
+                case Bit:
+                    return value;
+                case Datetime:
+                    return unpackDatetime(value);
+                case Timestamp:
+                    return value * 1000L;
+                case Date:
+                    return unpackDate((int)value);
+                case Time:
+                    return unpackTime((int)value);
+                default:
+                    throw new ClusterJUserException(
+                            local.message("ERR_Unsupported_Mapping", storeColumn.getType(), "long"));
+            }
+        }
+
         /** Put the low order three bytes of the input value into the ByteBuffer as a medium_value.
          * The format for medium value is always little-endian even on big-endian architectures.
          * Do not flip the buffer, as the caller will do that if needed.
@@ -613,7 +741,18 @@ public class Utility {
                     return result;
                 default:
                     throw new ClusterJUserException(local.message(
-                            "ERR_Unsupported_Mapping", storeColumn.getType(), "short"));
+                            "ERR_Unsupported_Mapping", storeColumn.getType(), "int"));
+            }
+        }
+
+        public int convertIntValueForStorage(Column storeColumn, int value) {
+            switch (storeColumn.getType()) {
+                case Bit:
+                case Int:
+                    return value;
+                default:
+                    throw new ClusterJUserException(local.message(
+                            "ERR_Unsupported_Mapping", storeColumn.getType(), "int"));
             }
         }
 
@@ -702,6 +841,26 @@ public class Utility {
             }
         }
 
+        public long convertLongValueFromStorage(Column storeColumn, long fromStorage) {
+            switch (storeColumn.getType()) {
+                case Bigint:
+                case Bigunsigned:
+                case Bit:
+                    return fromStorage;
+                case Datetime:
+                    return unpackDatetime(fromStorage);
+                case Timestamp:
+                    return fromStorage * 1000L;
+                case Date:
+                    return unpackDate((int)fromStorage);
+                case Time:
+                    return unpackTime((int)fromStorage);
+                default:
+                    throw new ClusterJUserException(
+                            local.message("ERR_Unsupported_Mapping", storeColumn.getType(), "long"));
+            }
+        }
+
     };
 
     /* Error codes that are not severe, and simply reflect expected conditions */
@@ -715,16 +874,21 @@ public class Utility {
 
     protected static interface EndianManager {
         public void put3byteInt(ByteBuffer byteBuffer, int value);
+        public int getInt(Column storeColumn, int value);
         public int getInt(Column storeColumn, NdbRecAttr ndbRecAttr);
         public short getShort(Column storeColumn, NdbRecAttr ndbRecAttr);
         public long getLong(Column storeColumn, NdbRecAttr ndbRecAttr);
+        public long getLong(Column storeColumn, long value);
         public byte getByte(Column storeColumn, NdbRecAttr ndbRecAttr);
         public ByteBuffer convertValue(Column storeColumn, byte value);
         public ByteBuffer convertValue(Column storeColumn, short value);
         public ByteBuffer convertValue(Column storeColumn, int value);
         public ByteBuffer convertValue(Column storeColumn, long value);
         public boolean getBoolean(Column storeColumn, NdbRecAttr ndbRecAttr);
+        public boolean getBoolean(Column storeColumn, int value);
+        public int convertIntValueForStorage(Column storeColumn, int value);
         public long convertLongValueForStorage(Column storeColumn, long value);
+        public long convertLongValueFromStorage(Column storeColumn, long fromStorage);
         public int convertByteValueForStorage(Column storeColumn, byte value);
         public int convertShortValueForStorage(Column storeColumn, short value);
     }
@@ -1823,6 +1987,10 @@ public class Utility {
         return endianManager.getBoolean(storeColumn, ndbRecAttr);
     }
 
+    public static boolean getBoolean(Column storeColumn, int value) {
+        return endianManager.getBoolean(storeColumn, value);
+    }
+
     /** Get a byte from this ndbRecAttr. 
      * 
      * @param storeColumn the Column
@@ -1853,6 +2021,10 @@ public class Utility {
         return endianManager.getInt(storeColumn, ndbRecAttr);
     }
 
+    public static int getInt(Column storeColumn, int value) {
+        return endianManager.getInt(storeColumn, value);
+    }
+
     /** Get a long from this ndbRecAttr. 
      * 
      * @param storeColumn the Column
@@ -1863,6 +2035,10 @@ public class Utility {
         return endianManager.getLong(storeColumn, ndbRecAttr);
     }
 
+    public static long getLong(Column storeColumn, long value) {
+        return endianManager.getLong(storeColumn, value);
+    }
+
     /** Convert a long value into a long for storage. The value parameter
      * may be a date (milliseconds since the epoch), a bit array, or simply a long value.
      * The storage format depends on the type of the column and the endian-ness of 
@@ -1900,4 +2076,8 @@ public class Utility {
         return endianManager.convertShortValueForStorage(storeColumn, value);
     }
 
+    public static int convertIntValueForStorage(Column storeColumn, int value) {
+        return endianManager.convertIntValueForStorage(storeColumn, value);
+    }
+
 }

=== modified file 'storage/ndb/clusterj/clusterj-tie/src/test/java/testsuite/clusterj/tie/StressTest.java'
--- a/storage/ndb/clusterj/clusterj-tie/src/test/java/testsuite/clusterj/tie/StressTest.java	2012-01-21 02:22:20 +0000
+++ b/storage/ndb/clusterj/clusterj-tie/src/test/java/testsuite/clusterj/tie/StressTest.java	2012-02-08 17:27:45 +0000
@@ -17,9 +17,6 @@
 
 package testsuite.clusterj.tie;
 
-import org.junit.Ignore;
-
-@Ignore
 public class StressTest extends testsuite.clusterj.StressTest {
 
 }

No bundle (reason: useless for push emails).
Thread
bzr push into mysql-5.5-cluster-7.2 branch (jonas.oreland:3807 to 3808) jonas oreland10 Feb