List:Commits« Previous MessageNext Message »
From:Frazer Clement Date:May 31 2012 3:25pm
Subject:bzr push into mysql-5.5-cluster-7.2 branch (frazer.clement:3927 to 3929)
Bug#14083116
View as plain text  
 3929 Frazer Clement	2012-05-31
      Bug#14083116 (Bad error handling in LQH corrupts transid hash)
      
      Testcase verifying fix.

    modified:
      mysql-test/suite/ndb/r/ndb_join_pushdown_default.result
      mysql-test/suite/ndb/t/ndb_join_pushdown.inc
 3928 Frazer Clement	2012-05-31 [merge]
      Merge 7.1-7.2

    added:
      storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/QueryOrderingTest.java
      storage/ndb/clusterj/clusterj-tie/src/test/java/testsuite/clusterj/tie/QueryOrderingTest.java
    modified:
      storage/ndb/clusterj/clusterj-api/src/main/java/com/mysql/clusterj/Query.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/CandidateIndexImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/InPredicateImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/PredicateImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/QueryDomainTypeImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/QueryExecutionContextImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/QueryImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/store/ScanOperation.java
      storage/ndb/clusterj/clusterj-core/src/main/resources/com/mysql/clusterj/core/Bundle.properties
      storage/ndb/clusterj/clusterj-jdbc/src/main/java/com/mysql/clusterj/jdbc/SQLExecutor.java
      storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/AbstractQueryTest.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordIndexScanOperationImpl.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordScanOperationImpl.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/ScanOperationImpl.java
      storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
      storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
 3927 Frazer Clement	2012-05-29 [merge]
      Merge 7.1->7.2

    added:
      storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/QueryLimitsTest.java
      storage/ndb/clusterj/clusterj-tie/src/test/java/testsuite/clusterj/tie/QueryLimitsTest.java
    modified:
      storage/ndb/clusterj/clusterj-api/src/main/java/com/mysql/clusterj/Query.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/QueryDomainTypeImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/QueryExecutionContextImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/QueryImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/store/ScanOperation.java
      storage/ndb/clusterj/clusterj-core/src/main/resources/com/mysql/clusterj/core/Bundle.properties
      storage/ndb/clusterj/clusterj-jdbc/src/main/java/com/mysql/clusterj/jdbc/SQLExecutor.java
      storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/AbstractQueryTest.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/ClusterTransactionImpl.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/DbImpl.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordScanOperationImpl.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordScanResultDataImpl.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/ScanOperationImpl.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/ScanResultDataImpl.java
      storage/ndb/include/mgmapi/ndbd_exit_codes.h
      storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
      storage/ndb/src/kernel/error/ndbd_exit_codes.c
=== modified file 'mysql-test/suite/ndb/r/ndb_join_pushdown_default.result'
--- a/mysql-test/suite/ndb/r/ndb_join_pushdown_default.result	2012-05-08 12:56:51 +0000
+++ b/mysql-test/suite/ndb/r/ndb_join_pushdown_default.result	2012-05-31 15:19:49 +0000
@@ -5709,6 +5709,16 @@ count(*)
 243
 set global debug=@save_debug;
 drop table t;
+create table t1 (a int primary key, b int, c int, index(b,c)) engine = ndb;
+insert into t1 values (4,null, 2);
+explain
+select x.a from t1 as x join t1 as y on y.a = x.b where x.a=4;
+id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
+1	SIMPLE	x	eq_ref	PRIMARY,b	PRIMARY	4	const	1	Parent of 2 pushed join@1
+1	SIMPLE	y	eq_ref	PRIMARY	PRIMARY	4	test.x.b	1	Child of 'x' in pushed join@1
+select x.a from t1 as x join t1 as y on y.a = x.b where x.a=4;
+a
+drop table t1;
 create temporary table spj_counts_at_end
 select counter_name, sum(val) as val 
 from ndbinfo.counters 
@@ -5730,7 +5740,7 @@ CONST_PRUNED_RANGE_SCANS_RECEIVED	8
 LOCAL_TABLE_SCANS_SENT	254
 PRUNED_RANGE_SCANS_RECEIVED	27
 RANGE_SCANS_RECEIVED	738
-READS_RECEIVED	47
+READS_RECEIVED	48
 TABLE_SCANS_RECEIVED	254
 drop table spj_counts_at_startup;
 drop table spj_counts_at_end;

=== modified file 'mysql-test/suite/ndb/t/ndb_join_pushdown.inc'
--- a/mysql-test/suite/ndb/t/ndb_join_pushdown.inc	2012-05-08 12:56:51 +0000
+++ b/mysql-test/suite/ndb/t/ndb_join_pushdown.inc	2012-05-31 15:19:49 +0000
@@ -4184,6 +4184,18 @@ set global debug=@save_debug;
 
 drop table t;
 
+####################################################
+# Test LQH handling of zero-length key in LQHKEYREQ
+# Bug#
+create table t1 (a int primary key, b int, c int, index(b,c)) engine = ndb;
+insert into t1 values (4,null, 2);
+
+explain
+select x.a from t1 as x join t1 as y on y.a = x.b where x.a=4;
+select x.a from t1 as x join t1 as y on y.a = x.b where x.a=4;
+
+drop table t1; 
+
 ########################################
 # Verify DBSPJ counters for entire test:
 # Note: These tables are 'temporary' withing 'connection spj'

=== modified file 'storage/ndb/clusterj/clusterj-api/src/main/java/com/mysql/clusterj/Query.java'
--- a/storage/ndb/clusterj/clusterj-api/src/main/java/com/mysql/clusterj/Query.java	2012-05-23 23:51:17 +0000
+++ b/storage/ndb/clusterj/clusterj-api/src/main/java/com/mysql/clusterj/Query.java	2012-05-31 00:47:21 +0000
@@ -1,5 +1,5 @@
 /*
-   Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved.
+   Copyright (c) 2009, 2012, Oracle and/or its affiliates. All rights reserved.
 
    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
@@ -44,6 +44,12 @@ public interface Query<E> {
     /** The query explain index used key */
     static final String INDEX_USED = "IndexUsed";
 
+    /** Ordering */
+    static enum Ordering {
+        ASCENDING, 
+        DESCENDING;
+    };
+
     /** Set the value of a parameter. If called multiple times for the same
      * parameter, silently replace the value.
      * @param parameterName the name of the parameter
@@ -119,4 +125,26 @@ public interface Query<E> {
      */
     void setLimits (long skip, long limit);
 
+    /** Set ordering for the results of this query. The execution of the query
+     * is modified to use an index previously defined.
+     * <ul><li>There must be an index defined on the columns mapped to
+     * the ordering fields, in the order of the ordering fields.
+     * </li><li>There must be no gaps in the ordering fields relative to the index.
+     * </li><li>All ordering fields must be in the index, but not all
+     * fields in the index need be in the ordering fields.
+     * </li><li>If an "in" predicate is used in the filter on a field in the ordering,
+     * it can only be used with the first field.
+     * </li><li>If any of these conditions is violated, ClusterJUserException is
+     * thrown when the query is executed.
+     * </li></ul>
+     * If an "in" predicate is used, each element in the parameter
+     * defines a separate range, and ordering is performed within that range.
+     * There may be a better (more efficient) index based on the filter,
+     * but specifying the ordering will force the query to use an index
+     * that contains the ordering fields.
+     * @param ordering either Ordering.ASCENDING or Ordering.DESCENDING
+     * @param orderingFields the fields to order by
+     */
+    void setOrdering(Ordering ordering, String... orderingFields);
+
 }

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/CandidateIndexImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/CandidateIndexImpl.java	2011-10-18 22:54:36 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/CandidateIndexImpl.java	2012-05-31 00:47:21 +0000
@@ -1,5 +1,5 @@
 /*
-   Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
+   Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved.
 
    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
@@ -64,6 +64,7 @@ public final class CandidateIndexImpl {
     private ScanType scanType = PredicateImpl.ScanType.TABLE_SCAN;
     private int fieldScore = 1;
     protected int score = 0;
+    private boolean canBound = true;
 
     public CandidateIndexImpl(
             String className, Index storeIndex, boolean unique, AbstractDomainFieldHandlerImpl[] fields) {
@@ -202,6 +203,7 @@ public final class CandidateIndexImpl {
             // range index
             // leading columns need any kind of bound
             // extra credit for equals
+            boolean firstColumn = true;
             for (CandidateColumnImpl candidateColumn: candidateColumns) {
                 if ((candidateColumn.equalBound)) {
                     scanType = PredicateImpl.ScanType.INDEX_SCAN;
@@ -215,7 +217,9 @@ public final class CandidateIndexImpl {
                     }
                 } else if ((candidateColumn.inBound)) {
                     scanType = PredicateImpl.ScanType.INDEX_SCAN;
-                    multiRange = true;
+                    if (firstColumn) {
+                        multiRange = true;
+                    }
                     if (!lowerBoundDone) {
                         score += fieldScore;
                         lastLowerBoundColumn = candidateColumn;
@@ -252,6 +256,7 @@ public final class CandidateIndexImpl {
                         continue;
                     }
                 }
+                firstColumn = false;
             }
             if (lastLowerBoundColumn != null) {
                 lastLowerBoundColumn.markLastLowerBoundColumn();
@@ -432,6 +437,12 @@ public final class CandidateIndexImpl {
          */
         private int operationSetBounds(
                 QueryExecutionContext context, IndexScanOperation op, int index, int boundStatus) {
+            if (inPredicate != null && index == -1
+                    || !canBound) {
+                // "in" predicate cannot be used to set bounds unless it is the first column in the index
+                // if index scan but no valid bounds to set skip bounds
+                return BOUND_STATUS_BOTH_BOUNDS_DONE;
+            }
 
             int boundSet = PredicateImpl.NO_BOUND_SET;
 
@@ -534,19 +545,90 @@ public final class CandidateIndexImpl {
 
     /** Is this index usable in the current context?
      * If a primary or unique index, all parameters must be non-null.
-     * If a btree index, the parameter for the first comparison must be non-null
+     * If a btree index, the parameter for the first comparison must be non-null.
+     * If ordering is specified, the ordering fields must appear in the proper position in the index.
+     * <ul><li>Returns -1 if this index is unusable.
+     * </li><li>Returns 0 if this index is usable but has no filtering terms
+     * </li><li>Returns 1 if this index is usable and has at least one usable filtering term
+     * </li></ul>
      * @param context the query execution context
-     * @return true if all relevant parameters in the context are non-null
+     * @param orderingFields the fields in the ordering
+     * @return the usability of this index
      */
-    public boolean isUsable(QueryExecutionContext context) {
-        if (unique) {
-            return context.hasNoNullParameters();
+    public int isUsable(QueryExecutionContext context, String[] orderingFields) {
+        boolean ordering = orderingFields != null;
+        if (ordering && !containsAllOrderingFields(orderingFields)) {
+            return -1;
+        }
+                
+        // ordering is ok; unique indexes have to have no null parameters
+        if (unique && score > 0) {
+            return context.hasNoNullParameters()?1:-1;
         } else {
-            // the first parameter must not be null
-            CandidateColumnImpl candidateColumn = candidateColumns[0];
-            PredicateImpl predicate = candidateColumn.predicate;
-            return predicate.isUsable(context);
+            // index scan; the first parameter must not be null
+            if (candidateColumns == null) {
+                // this is a dummy index for "no where clause"
+                canBound = false;
+            } else {
+                CandidateColumnImpl candidateColumn = candidateColumns[0];
+                PredicateImpl predicate = candidateColumn.predicate;
+                canBound = predicate != null && predicate.isUsable(context);
+            }
+            // if first parameter is null, can scan but not bound
+            if (canBound) {
+                if (logger.isDebugEnabled()) logger.debug("for " + indexName + " canBound true -> returns 1");
+                scanType = PredicateImpl.ScanType.INDEX_SCAN;
+                return 1;
+            } else {
+                if (ordering) {
+                    if (logger.isDebugEnabled()) logger.debug("for " + indexName + " canBound false -> returns 0");
+                    scanType = PredicateImpl.ScanType.INDEX_SCAN;
+                    return 0;
+                } else {
+                    if (logger.isDebugEnabled()) logger.debug("for " + indexName + " canBound false -> returns -1");
+                    return -1;
+                }
+            }
+        }
+    }
+
+    /** Does this index contain all ordering fields?
+     * 
+     * @param orderingFields the ordering fields
+     * @return true if this ordered index contains all ordering fields in the proper position with no gaps
+     */
+    public boolean containsAllOrderingFields(String[] orderingFields) {
+        if (isUnique()) {
+            return false;
+        }
+        int candidateColumnIndex = 0;
+        if (orderingFields != null) {
+            for (String orderingField: orderingFields) {
+                if (candidateColumnIndex >= candidateColumns.length) {
+                    // too many columns in orderingFields for this index
+                    if (logger.isDebugEnabled()) logger.debug("Index " + indexName + " cannot be used because "
+                            + orderingField + " is not part of this index.");
+                    return false;
+                }
+                // each ordering field must correspond in order to the index fields
+                CandidateColumnImpl candidateColumn = candidateColumns[candidateColumnIndex++];
+                if (!orderingField.equals(candidateColumn.domainFieldHandler.getName())) {
+                    // the ordering field is not in the proper position in this candidate index
+                    if (logger.isDebugEnabled()) {
+                        logger.debug("Index " + indexName + " cannot be used because CandidateColumn "
+                            + candidateColumn.domainFieldHandler.getName() + " does not match " + orderingField);
+                    }
+                    return false;
+                }
+            }
+            if (logger.isDebugEnabled()) {
+                logger.debug("CandidateIndexImpl.containsAllOrderingFields found possible index (unique: "
+                        + unique + ") " + indexName);
+            }
+            scanType = PredicateImpl.ScanType.INDEX_SCAN;
+            return true;
         }
+        return false;
     }
 
 }

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/InPredicateImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/InPredicateImpl.java	2011-10-18 22:54:36 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/InPredicateImpl.java	2012-05-31 00:47:21 +0000
@@ -1,5 +1,5 @@
 /*
-   Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
+   Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved.
 
    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
@@ -196,7 +196,7 @@ public class InPredicateImpl extends Pre
                 for (Object value: iterable) {
                     property.filterCmpValue(value, BinaryCondition.COND_EQ, filter);
                 }
-            } else if (parameterValue.getClass().isArray()) {
+            } else if (Object[].class.isAssignableFrom(parameterValue.getClass())) {
                 Object[] parameterArray = (Object[])parameterValue;
                 for (Object value: parameterArray) {
                     property.filterCmpValue(value, BinaryCondition.COND_EQ, filter);

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/PredicateImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/PredicateImpl.java	2011-11-22 22:01:23 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/PredicateImpl.java	2012-05-31 00:47:21 +0000
@@ -1,5 +1,5 @@
 /*
-   Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
+   Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved.
 
    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
@@ -208,42 +208,55 @@ public abstract class PredicateImpl impl
         return dobj;
     }
 
-    public CandidateIndexImpl getBestCandidateIndex(QueryExecutionContext context) {
-        return getBestCandidateIndexFor(context, getTopLevelPredicates());
+    public CandidateIndexImpl getBestCandidateIndex(QueryExecutionContext context, String[] orderingFields) {
+        return getBestCandidateIndexFor(context, getTopLevelPredicates(), orderingFields);
     }
 
     /** Get the best candidate index for the query, considering all indices
-     * defined and all predicates in the query. If a unique index is usable
-     * (no non-null parameters) then return it. Otherwise, simply choose the
-     * first index for which there is at least one leading non-null parameter.
+     * defined, ordering fields, and all predicates in the query. If a unique index is usable
+     * (no non-null parameters), then return it (ordering is not relevant for a single result).
+     * Otherwise, choose the first index which includes the ordering fields and for which there
+     * is at least one leading non-null parameter. If there are ordering fields and an index
+     * containing those fields, the index might be used as a last resort in case no better index can be found.
+     * @param context the query execution context
      * @param predicates the predicates
+     * @param orderingFields the ordering fields
      * @return the best index for the query
      */
     protected CandidateIndexImpl getBestCandidateIndexFor(QueryExecutionContext context,
-            PredicateImpl... predicates) {
+            PredicateImpl[] predicates, String[] orderingFields) {
         // if there is a primary/unique index, see if it can be used in the current context
-        if (uniqueIndex != null && uniqueIndex.isUsable(context)) {
+        if (uniqueIndex != null && uniqueIndex.isUsable(context, null) > 0) {
             if (logger.isDebugEnabled()) logger.debug("usable unique index: " + uniqueIndex.getIndexName());
             return uniqueIndex;
         }
         // find the best candidate index by returning the highest scoring index that is usable
-        // in the current context; i.e. has non-null parameters
-        // TODO: it might be better to score indexes again considering the current context
+        // in the current context; i.e. satisfies all ordering fields and has non-null parameters
+        // the scored candidate indices are already ordered by the number of query terms
+        CandidateIndexImpl lastResort = null;
         for (CandidateIndexImpl index: scoredCandidateIndices) {
-            if (index.isUsable(context)) {
-            if (logger.isDebugEnabled()) logger.debug("usable ordered index: " + index.getIndexName());
+            int usability = index.isUsable(context, orderingFields);
+            if (logger.isDebugEnabled()) logger.debug("index " + index.getIndexName() + " usability: " + usability);
+            if (usability > 0) {
                 return index;
+            } else if (usability == 0) {
+                if (!index.isUnique()) {
+                    if (logger.isDebugEnabled()) logger.debug("last resort: " + lastResort.getIndexName());
+                    // save this index; we might have to use it as a last resort
+                    lastResort = index;
+                }
             }
         }
         // there is no index that is usable in the current context
-        return CandidateIndexImpl.getIndexForNullWhereClause();
+        // use the last resort if there is one and there are ordering fields
+        return (lastResort!=null && orderingFields!=null)?lastResort:CandidateIndexImpl.getIndexForNullWhereClause();
 
     }
 
     /** Get the number of conditions in the top level predicate.
-     * This is used to determine whether a hash index can be used. If there
+     * This is used to determine whether a unique index can be used. If there
      * are exactly the number of conditions as index columns, then the
-     * hash index might be used.
+     * unique index might be used.
      * By default (for equal, greaterThan, lessThan, greaterEqual, lessEqual)
      * there is one condition.
      * AndPredicateImpl overrides this method.
@@ -266,26 +279,24 @@ public abstract class PredicateImpl impl
             predicateImpl.markBoundsForCandidateIndices(candidateIndices);
         }
         // Iterate over candidate indices to find those that are usable.
-        // Hash index operations require the predicates to have no extra conditions
-        // beyond the index columns.
+        // Unique index operations require the predicates to have no extra conditions
+        // beyond the index columns because key operations cannot have filters.
         // Btree index operations are ranked by the number of usable conditions
         int numberOfConditions = getNumberOfConditionsInPredicate();
         for (CandidateIndexImpl candidateIndex : candidateIndices) {
+            
             if (candidateIndex.supportsConditionsOfLength(numberOfConditions)) {
                 candidateIndex.score();
                 int score = candidateIndex.getScore();
-                if (score != 0) {
-                    if (candidateIndex.isUnique()) {
-                        // there can be only one unique index for a given predicate
-                        uniqueIndex = candidateIndex;
-                    } else {
-                        // add possible indices to ordered map
-                        scoredCandidateIndices.add(candidateIndex);
-                    }
-                }
-                if (logger.isDetailEnabled()) {
-                    logger.detail("Score: " + score + " from " + candidateIndex.getIndexName());
+                
+                if (score != 0 && candidateIndex.isUnique()) {
+                    // there can be only one unique index for a given predicate
+                    uniqueIndex = candidateIndex;
+                } else {
+                    // add possible indices to ordered map
+                    scoredCandidateIndices.add(candidateIndex);
                 }
+            if (logger.isDetailEnabled()) logger.detail("Score: " + score + " from " + candidateIndex.getIndexName());
             }
         }
     }

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/QueryDomainTypeImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/QueryDomainTypeImpl.java	2012-05-29 17:15:08 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/QueryDomainTypeImpl.java	2012-05-31 15:07:44 +0000
@@ -22,6 +22,7 @@ import com.mysql.clusterj.ClusterJFatalI
 import com.mysql.clusterj.ClusterJUserException;
 import com.mysql.clusterj.Query;
 
+import com.mysql.clusterj.Query.Ordering;
 import com.mysql.clusterj.core.query.PredicateImpl.ScanType;
 import com.mysql.clusterj.core.spi.DomainFieldHandler;
 import com.mysql.clusterj.core.spi.DomainTypeHandler;
@@ -79,6 +80,15 @@ public class QueryDomainTypeImpl<T> impl
     protected Map<String, PropertyImpl> properties =
             new HashMap<String, PropertyImpl>();
 
+    /** My index for this query */
+    CandidateIndexImpl index = null;
+
+    /** My ordering fields for this query */
+    String[] orderingFields = null;
+
+    /** My ordering for this query */
+    Ordering ordering = null;
+
     public QueryDomainTypeImpl(DomainTypeHandler<T> domainTypeHandler, Class<T> cls) {
         this.cls = cls;
         this.domainTypeHandler = domainTypeHandler;
@@ -153,10 +163,13 @@ public class QueryDomainTypeImpl<T> impl
     /** Query.getResultList delegates to this method.
      * @param skip the number of rows to skip
      * @param limit the limit of rows to return after skipping
+     * @param orderingFields 
+     * @param ordering 
      * 
      * @return the results of executing the query
      */
-    public List<T> getResultList(QueryExecutionContext context, long skip, long limit) {
+    public List<T> getResultList(QueryExecutionContext context, long skip, long limit,
+            Ordering ordering, String[] orderingFields) {
         assertAllParametersBound(context);
 
         SessionSPI session = context.getSession();
@@ -165,7 +178,7 @@ public class QueryDomainTypeImpl<T> impl
         List<T> resultList = new ArrayList<T>();
         try {
             // execute the query
-            ResultData resultData = getResultData(context, skip, limit);
+            ResultData resultData = getResultData(context, skip, limit, ordering, orderingFields);
             // put the result data into the result list
             while (resultData.next()) {
                 T row = session.newInstance(resultData, domainTypeHandler);
@@ -190,18 +203,22 @@ public class QueryDomainTypeImpl<T> impl
      * @param context the query context, including the bound parameters
      * @param skip the number of rows to skip
      * @param limit the limit of rows to return after skipping
+     * @param orderingFields 
+     * @param ordering 
      * @return the raw result data from the query
      * @throws ClusterJUserException if not all parameters are bound
      */
-    public ResultData getResultData(QueryExecutionContext context, long skip, long limit) {
+    public ResultData getResultData(QueryExecutionContext context, long skip, long limit,
+            Ordering ordering, String[] orderingFields) {
         SessionSPI session = context.getSession();
+        this.ordering = ordering;
+        this.orderingFields = orderingFields;
         // execute query based on what kind of scan is needed
         // if no where clause, scan the entire table
-        CandidateIndexImpl index = where==null?
-                CandidateIndexImpl.getIndexForNullWhereClause():
-                where.getBestCandidateIndex(context);
-
+        index = getCandidateIndex(context);
         ScanType scanType = index.getScanType();
+
+        if (logger.isDebugEnabled()) logger.debug("using index " + index.getIndexName() + " with scanType " + scanType);
         Map<String, Object> explain = newExplain(index, scanType);
         context.setExplain(explain);
         ResultData result = null;
@@ -216,7 +233,6 @@ public class QueryDomainTypeImpl<T> impl
                     if (skip > 0 || limit < 1) {
                         return resultDataEmpty;
                     }
-                    if (logger.isDetailEnabled()) logger.detail("Using primary key find for query.");
                     // perform a select operation
                     op = session.getSelectOperation(domainTypeHandler.getStoreTable());
                     op.beginDefinition();
@@ -232,7 +248,6 @@ public class QueryDomainTypeImpl<T> impl
 
                 case INDEX_SCAN: {
                     storeIndex = index.getStoreIndex();
-                    if (logger.isDetailEnabled()) logger.detail("Using index scan with ordered index " + index.getIndexName() + " for query.");
                     // perform an index scan operation
                     if (index.isMultiRange()) {
                         op = session.getIndexScanOperationMultiRange(storeIndex, domainTypeHandler.getStoreTable());
@@ -242,12 +257,15 @@ public class QueryDomainTypeImpl<T> impl
                         
                     }
                     op.beginDefinition();
+                    ((ScanOperation)op).setOrdering(ordering);
                     // set the expected columns into the operation
                     domainTypeHandler.operationGetValues(op);
                     // set the bounds into the operation
                     index.operationSetBounds(context, (IndexScanOperation)op);
                     // set additional filter conditions
-                    where.filterCmpValue(context, (IndexScanOperation)op);
+                    if (where != null) {
+                        where.filterCmpValue(context, (IndexScanOperation)op);
+                    }
                     op.endDefinition();
                     // execute the scan and get results
                     result = ((ScanOperation)op).resultData(true, skip, limit);
@@ -255,7 +273,9 @@ public class QueryDomainTypeImpl<T> impl
                 }
 
                 case TABLE_SCAN: {
-                    if (logger.isDetailEnabled()) logger.detail("Using table scan for query.");
+                    if (ordering != null) {
+                        throw new ClusterJUserException(local.message("ERR_Cannot_Use_Ordering_With_Table_Scan"));
+                    }
                     // perform a table scan operation
                     op = session.getTableScanOperation(domainTypeHandler.getStoreTable());
                     op.beginDefinition();
@@ -277,7 +297,6 @@ public class QueryDomainTypeImpl<T> impl
                         return resultDataEmpty;
                     }
                     storeIndex = index.getStoreIndex();
-                    if (logger.isDetailEnabled()) logger.detail("Using lookup with unique index " + index.getIndexName() + " for query.");
                     // perform a unique lookup operation
                     op = session.getUniqueIndexOperation(storeIndex, domainTypeHandler.getStoreTable());
                     op.beginDefinition();
@@ -328,9 +347,7 @@ public class QueryDomainTypeImpl<T> impl
         SessionSPI session = context.getSession();
         // calculate what kind of scan is needed
         // if no where clause, scan the entire table
-        CandidateIndexImpl index = where==null?
-            CandidateIndexImpl.getIndexForNullWhereClause():
-            where.getBestCandidateIndex(context);
+        index = getCandidateIndex(context);
         ScanType scanType = index.getScanType();
         Map<String, Object> explain = newExplain(index, scanType);
         context.setExplain(explain);
@@ -339,7 +356,6 @@ public class QueryDomainTypeImpl<T> impl
         Index storeIndex;
         session.startAutoTransaction();
         Operation op = null;
-
         try {
             switch (scanType) {
 
@@ -444,9 +460,7 @@ public class QueryDomainTypeImpl<T> impl
         SessionSPI session = context.getSession();
         // calculate what kind of scan is needed
         // if no where clause, scan the entire table
-        CandidateIndexImpl index = where==null?
-            CandidateIndexImpl.getIndexForNullWhereClause():
-            where.getBestCandidateIndex(context);
+        index = getCandidateIndex(context);
         ScanType scanType = index.getScanType();
         Map<String, Object> explain = newExplain(index, scanType);
         context.setExplain(explain);
@@ -556,14 +570,32 @@ public class QueryDomainTypeImpl<T> impl
      */
     public void explain(QueryExecutionContext context) {
         assertAllParametersBound(context);
-        CandidateIndexImpl index = where==null?
-                CandidateIndexImpl.getIndexForNullWhereClause():
-                where.getBestCandidateIndex(context);
+        CandidateIndexImpl index = getCandidateIndex(context);
         ScanType scanType = index.getScanType();
         Map<String, Object> explain = newExplain(index, scanType);
         context.setExplain(explain);
     }
 
+    private CandidateIndexImpl getCandidateIndex(QueryExecutionContext context) {
+        if (where == null) {
+            // there is no filter, so without ordering this is a table scan
+            // with ordering, choose an index that contains all ordering fields
+            CandidateIndexImpl[] candidateIndexImpls = domainTypeHandler.createCandidateIndexes();
+            for (CandidateIndexImpl candidateIndexImpl: candidateIndexImpls) {
+                // choose the first index that contains all ordering fields
+                if (candidateIndexImpl.containsAllOrderingFields(orderingFields)) {
+                    index = candidateIndexImpl;
+                    return index;
+                }
+            }
+            index = CandidateIndexImpl.getIndexForNullWhereClause();
+        } else {
+            // there is a filter; choose the best index that contains all ordering fields
+            index = where.getBestCandidateIndex(context, orderingFields);
+        }
+        return index;
+    }
+
     /** Create a new explain for this query.
      * @param index the index used
      * @param scanType the scan type

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/QueryExecutionContextImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/QueryExecutionContextImpl.java	2012-05-29 17:15:08 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/QueryExecutionContextImpl.java	2012-05-31 15:07:44 +0000
@@ -1,5 +1,5 @@
 /*
-   Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+   Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved.
 
    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
@@ -79,6 +79,7 @@ public class QueryExecutionContextImpl i
      */
     protected QueryExecutionContextImpl(QueryExecutionContextImpl context) {
         this.session = context.getSession();
+        this.explain = context.getExplain();
         boundParameters = new HashMap<String, Object>(context.boundParameters);
     }
 
@@ -102,6 +103,8 @@ public class QueryExecutionContextImpl i
                     local.message("ERR_Parameter_Null"));
         }
         boundParameters.put(parameterName, value);
+        // if any parameters changed, the explain is no longer valid
+        this.explain = null;
     }
     /** Get the value of a parameter by name.
      */
@@ -128,7 +131,7 @@ public class QueryExecutionContextImpl i
 
     public ResultData getResultData(QueryDomainType<?> queryDomainType) {
         // TODO handle skip and limit
-        return ((QueryDomainTypeImpl<?>)queryDomainType).getResultData(this, 0, Long.MAX_VALUE);
+        return ((QueryDomainTypeImpl<?>)queryDomainType).getResultData(this, 0, Long.MAX_VALUE, null, null);
     }
 
     /** Add a filter to the list of filters created for this query.

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/QueryImpl.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/QueryImpl.java	2012-05-23 23:51:17 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/QueryImpl.java	2012-05-31 00:47:21 +0000
@@ -1,5 +1,5 @@
 /*
-   Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved.
+   Copyright (c) 2009, 2012, Oracle and/or its affiliates. All rights reserved.
 
    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
@@ -52,6 +52,12 @@ public class QueryImpl<E> implements Que
     /** The limit */
     protected long limit = Long.MAX_VALUE;
 
+    /** The order */
+    protected Query.Ordering ordering = null;
+
+    /** The ordering fields */
+    protected String[] orderingFields = null;
+
     public QueryImpl(SessionImpl session, QueryDomainTypeImpl<E> dobj) {
         this.session = session;
         context = new QueryExecutionContextImpl(session);
@@ -89,6 +95,32 @@ public class QueryImpl<E> implements Que
         }
     }
 
+    /** Set ordering for this query. Verify that the ordering fields exist in the domain type.
+     * @param ordering the ordering for the query
+     * @param orderingFields the list of fields to order by
+     */
+    public void setOrdering(com.mysql.clusterj.Query.Ordering ordering,
+            String... orderingFields) {
+        this.ordering = ordering;
+        this.orderingFields = orderingFields;
+        // verify that all ordering fields actually are fields
+        StringBuilder builder = new StringBuilder();
+        String separator = "";
+        for (String orderingField : orderingFields) {
+            try {
+                dobj.get(orderingField);
+            } catch (ClusterJUserException ex) {
+                builder.append(separator);
+                builder.append(orderingField);
+                separator = ", ";
+            }
+        }
+        String errors = builder.toString();
+        if (errors.length() > 0) {
+            throw new ClusterJUserException(local.message("ERR_Ordering_Field_Does_Not_Exist", errors));
+        }
+    }
+
     public Results<E> execute(Object arg0) {
             throw new UnsupportedOperationException(
                     local.message("ERR_NotImplemented"));
@@ -109,7 +141,7 @@ public class QueryImpl<E> implements Que
     }
 
     public List<E> getResultList() {
-        List<E> results = dobj.getResultList(context, skip, limit);
+        List<E> results = dobj.getResultList(context, skip, limit, ordering, orderingFields);
         // create new context, copying the parameters, for another execution
         context = new QueryExecutionContextImpl(context);
         return results;

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/store/ScanOperation.java'
--- a/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/store/ScanOperation.java	2012-05-29 17:15:08 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/store/ScanOperation.java	2012-05-31 15:07:44 +0000
@@ -1,5 +1,5 @@
 /*
-   Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+   Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved.
 
    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
@@ -17,6 +17,7 @@
 
 package com.mysql.clusterj.core.store;
 
+import com.mysql.clusterj.Query.Ordering;
 import com.mysql.clusterj.core.spi.QueryExecutionContext;
 
 /**
@@ -34,4 +35,6 @@ public interface ScanOperation extends O
 
     public ResultData resultData(boolean execute, long skip, long limit);
 
+    public void setOrdering(Ordering ordering);
+
 }

=== modified file 'storage/ndb/clusterj/clusterj-core/src/main/resources/com/mysql/clusterj/core/Bundle.properties'
--- a/storage/ndb/clusterj/clusterj-core/src/main/resources/com/mysql/clusterj/core/Bundle.properties	2012-05-23 23:51:17 +0000
+++ b/storage/ndb/clusterj/clusterj-core/src/main/resources/com/mysql/clusterj/core/Bundle.properties	2012-05-31 00:47:21 +0000
@@ -140,3 +140,6 @@ is too big; it must contain fewer than 4
 MSG_Removing_Schema:Removing schema {0} after failure to initialize domain type handler for class {1}.
 ERR_Invalid_Limits:The limits {0}, {1} are invalid: the first parameter must be greater than or equal to zero; \
 the second parameter must be greater than or equal to zero; and limits cannot be applied to delete.
+ERR_Cannot_Use_Ordering_With_Table_Scan:There is no index containing the ordering fields.
+ERR_Invalid_Ordering:The ordering specified {0} is not valid.
+ERR_Ordering_Field_Does_Not_Exist:The ordering field(s) {0} do not exist in the domain type.

=== modified file 'storage/ndb/clusterj/clusterj-jdbc/src/main/java/com/mysql/clusterj/jdbc/SQLExecutor.java'
--- a/storage/ndb/clusterj/clusterj-jdbc/src/main/java/com/mysql/clusterj/jdbc/SQLExecutor.java	2012-05-23 23:51:17 +0000
+++ b/storage/ndb/clusterj/clusterj-jdbc/src/main/java/com/mysql/clusterj/jdbc/SQLExecutor.java	2012-05-31 00:47:21 +0000
@@ -1,5 +1,5 @@
 /*
- *  Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ *  Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
@@ -186,8 +186,8 @@ public class SQLExecutor {
             session.startAutoTransaction();
             try {
                 valueHandlerBatching.next();
-                // TODO get skip and limit from the SQL query
-                ResultData resultData = queryDomainType.getResultData(context, 0, Long.MAX_VALUE);
+                // TODO get skip and limit and ordering from the SQL query
+                ResultData resultData = queryDomainType.getResultData(context, 0L, Long.MAX_VALUE, null, null);
                 // session.endAutoTransaction();
                 return new ResultSetInternalMethodsImpl(resultData, columnNumberToFieldNumberMap, 
                         columnNameToFieldNumberMap, session);

=== modified file 'storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/AbstractQueryTest.java'
--- a/storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/AbstractQueryTest.java	2012-05-23 23:51:17 +0000
+++ b/storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/AbstractQueryTest.java	2012-05-31 00:47:21 +0000
@@ -1,5 +1,5 @@
 /*
-   Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved.
+   Copyright (c) 2009, 2012, Oracle and/or its affiliates. All rights reserved.
 
    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
@@ -25,6 +25,7 @@ import com.mysql.clusterj.query.QueryDom
 import com.mysql.clusterj.query.Predicate;
 import com.mysql.clusterj.query.PredicateOperand;
 
+import java.util.ArrayList;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
@@ -64,6 +65,12 @@ abstract public class AbstractQueryTest
     /** The upper limit (number of rows to return) */
     protected Long limit = null;
 
+    /** The ordering for the query */
+    protected Query.Ordering ordering = null;
+
+    /** The ordering fields for the query */
+    protected String[] orderingFields = null;
+
     @Override
     public void localSetUp() {
         setAutotransaction(false);
@@ -92,6 +99,11 @@ abstract public class AbstractQueryTest
         this.limit = limit;
     }
 
+    protected void setOrdering(Query.Ordering ordering, String... orderingFields) {
+        this.ordering = ordering;
+        this.orderingFields = orderingFields;
+    }
+
     class QueryHolder {
         public QueryBuilder builder;
         public QueryDomainType<?> dobj;
@@ -148,6 +160,7 @@ abstract public class AbstractQueryTest
         public Predicate extraGreaterEqualAndLessEqual;
         public Query<?> query;
         public Set<Integer> expectedSet = new HashSet<Integer>();
+        public List<Integer> expectedList = new ArrayList<Integer>();
         public String expectedIndex;
         private Predicate equalOrIn;
         private Predicate extraIn;
@@ -244,6 +257,7 @@ abstract public class AbstractQueryTest
         public void setExpectedResultIds(int... expecteds) {
             for (int expected:expecteds) {
                 expectedSet.add(expected);
+                expectedList.add(expected);
             }
         }
         public void setExtraParameterEqual(Object parameter) {
@@ -269,17 +283,28 @@ abstract public class AbstractQueryTest
                     query.setLimits(0, limit);
                 }
             }
+            if (ordering != null) {
+                query.setOrdering(ordering, orderingFields);
+            }
             Set<Integer> actualSet = new HashSet<Integer>();
+            List<Integer> actualList = new ArrayList<Integer>();
             List<IdBase> resultList = (List<IdBase>) query.getResultList();
             for (IdBase result: resultList) {
                 printResultInstance(result);
                 actualSet.add(result.getId());
+                actualList.add(result.getId());
             }
             errorIfNotEqual("Wrong index used for " + theQuery + " query: ",
                     expectedIndex, query.explain().get("IndexUsed"));
-            errorIfNotEqual("Wrong ids returned from " + theQuery + " query: ",
-                    expectedSet, actualSet);
+            if (ordering != null) {
+                // must check ordering not just values
+                errorIfNotEqual("Wrong ids returned from ordered " + ordering + " " + theQuery + " query: ",
+                        expectedList, actualList);
+            } else {
+                errorIfNotEqual("Wrong ids returned from " + theQuery + " query: ",
+                        expectedSet, actualSet);
             }
+        }
 
         public void checkDeletePersistentAll(String where, int expectedNumberOfDeletedInstances) {
             if (limit != null) {
@@ -372,6 +397,21 @@ abstract public class AbstractQueryTest
     protected void printResultInstance(IdBase instance) {
     }
 
+    public void noWhereQuery(String propertyName, String expectedIndex,
+            Object parameterValue, int... expected) {
+        tx.begin();
+        QueryHolder holder = new QueryHolder(getInstanceType(), propertyName, expectedIndex);
+        // specify no where clause
+        // create the query
+        holder.createQuery(session);
+        // set the parameter value
+        holder.setParameterEqual(parameterValue);
+        // get the results
+        holder.setExpectedResultIds(expected);
+        holder.checkResults(propertyName + " noWhere");
+        tx.commit();
+    }
+
     public void equalQuery(String propertyName, String expectedIndex,
             Object parameterValue, int... expected) {
         tx.begin();

=== added file 'storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/QueryOrderingTest.java'
--- a/storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/QueryOrderingTest.java	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/QueryOrderingTest.java	2012-05-31 00:47:21 +0000
@@ -0,0 +1,249 @@
+/*
+Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+*/
+
+package testsuite.clusterj;
+
+import com.mysql.clusterj.ClusterJUserException;
+import com.mysql.clusterj.Query.Ordering;
+
+import java.util.Arrays;
+
+import testsuite.clusterj.model.LongIntStringIndex;
+
+/** Verify queries using ordering. If a query uses ordering, there must be an index
+ * containing the ordering columns already defined in the database.
+ * 
+ * This test is based on AbstractQueryTest.
+ */
+public class QueryOrderingTest extends AbstractQueryTest {
+
+    /*
+drop table if exists longintstringix;
+create table longintstringix (
+ id int(11) not null,
+ longix bigint(20) not null,
+ stringix varchar(10) not null,
+ intix int(11) not null,
+ stringvalue varchar(10) default null,
+ PRIMARY KEY (id),
+ KEY idx_long_int_string (longix, intix, stringix)
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1;
+
+      */
+
+    @Override
+    public Class<?> getInstanceType() {
+        return LongIntStringIndex.class;
+    }
+
+    /** Most query tests use the same number of instances (10).
+     * But this test uses 25.
+     */
+    @Override
+    protected int getNumberOfInstances() {
+        return 25;
+    }
+
+    protected int PK_MODULUS = 3;
+    protected long PRETTY_BIG_NUMBER = 1000000000000000L;
+
+    @Override
+    protected boolean getCleanupAfterTest() {
+        return false;
+    }
+
+    public void testNegativeOrderingFieldsTooLong() {
+        try {
+            setOrdering(Ordering.ASCENDING, "longix", "intix", "stringix", "id");
+            greaterEqualQuery("longix", "idx_long_int_string", 0L, 9);
+            fail("Ordering fields too long should fail.");
+        } catch (ClusterJUserException ex) {
+            // good catch
+        }
+        failOnError();
+    }
+
+    public void testNegativeNoIndexMatchesOrderingFields() {
+        try {
+            setOrdering(Ordering.ASCENDING, "longix", "intix", "id");
+            greaterEqualQuery("longix", "idx_long_int_string", 0L, 9);
+            fail("Ordering field not in index should fail.");
+        } catch (ClusterJUserException ex) {
+            // good catch
+        }
+        failOnError();
+    }
+
+    public void testNegativeOrderingFieldsNotInPosition() {
+        try {
+            setOrdering(Ordering.ASCENDING, "longix", "stringix", "intix");
+            greaterEqualQuery("longix", "idx_long_int_string", 0L, 9);
+            fail("Ordering field in wrong position in index should fail.");
+        } catch (ClusterJUserException ex) {
+            // good catch
+        }
+        failOnError();
+    }
+
+    public void testNegativeOrderingFieldsAreNotFields() {
+        try {
+            setOrdering(Ordering.ASCENDING, "poop");
+            greaterEqualQuery("longix", "idx_long_int_string", 0L, 9);
+            fail("Ordering field not a field should fail.");
+        } catch (ClusterJUserException ex) {
+            // good catch
+            String message = ex.getMessage();
+            errorIfNotEqual("Error message '" + message + "' does not contain the name of the failing field 'poop'.",
+                    true, message.contains("poop"));
+        }
+        failOnError();
+    }
+
+    public void testNegativeMultipleOrderingFieldsAreNotFields() {
+        try {
+            setOrdering(Ordering.ASCENDING, "dupe", "poop");
+            greaterEqualQuery("longix", "idx_long_int_string", 0L, 9);
+            fail("Ordering field not a field should fail.");
+        } catch (ClusterJUserException ex) {
+            // good catch
+            String message = ex.getMessage();
+            errorIfNotEqual("Error message '" + message + "' does not contain the name of the failing fields 'poop' and 'dupe'.",
+                    true, message.contains("poop") && message.contains("dupe"));
+        }
+        failOnError();
+    }
+
+    public void testNoWhereAscending() {
+        System.out.println("QueryOrderingTest.testNoWhereAscending");
+        setOrdering(Ordering.ASCENDING, "id");
+        noWhereQuery("id", "PRIMARY", null, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24);
+        failOnError();
+    }
+
+    public void testNoWhereDescending() {
+        System.out.println("QueryOrderingTest.testNoWhereDescending");
+        setOrdering(Ordering.DESCENDING, "id");
+        noWhereQuery("id", "PRIMARY", null, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+        failOnError();
+    }
+
+    public void testPrimaryEqualAscending() {
+        setOrdering(Ordering.ASCENDING, "longix", "intix", "stringix");
+        equalQuery("id", "PRIMARY", 1, 1);
+        failOnError();
+    }
+
+    public void testGreaterEqualAscending() {
+        setOrdering(Ordering.ASCENDING, "longix", "intix", "stringix");
+        greaterEqualQuery("longix", "idx_long_int_string", 2000000000000000L, 18, 19, 20, 21, 22, 23, 24);
+        failOnError();
+    }
+
+    public void testGreaterEqualAscendingPartial() {
+        setOrdering(Ordering.ASCENDING, "longix", "intix");
+        greaterEqualQuery("longix", "idx_long_int_string", 2000000000000000L, 18, 19, 20, 21, 22, 23, 24);
+        failOnError();
+    }
+
+    public void testInAndBetweenAscending() {
+        setOrdering(Ordering.ASCENDING, "longix", "intix");
+        inAndBetweenQuery("longix", new Object[] {1000000000000000L, 0L}, "intix", 1, 2, "idx_long_int_string", 12, 13, 14, 15, 16, 17, 3, 4, 5, 6, 7, 8);
+        inAndBetweenQuery("longix", Arrays.asList(new Object[] {1000000000000000L, 0L}), "stringix", "1", "4", "idx_long_int_string", 10, 11, 13, 14, 16, 17, 1, 2, 4, 5, 7, 8);
+        failOnError();        
+    }
+
+    public void testInAndBetweenDescending() {
+        setOrdering(Ordering.DESCENDING, "longix", "intix", "stringix");
+        inAndBetweenQuery("longix", new Object[] {1000000000000000L, 0L}, "intix", 1, 2, "idx_long_int_string", 17, 16, 15, 14, 13, 12, 8, 7, 6, 5, 4, 3);
+        inAndBetweenQuery("longix", Arrays.asList(new Object[] {1000000000000000L, 0L}), "stringix", "1", "4", "idx_long_int_string", 17, 16, 14, 13, 11, 10, 8, 7, 5, 4, 2, 1);
+        failOnError();        
+    }
+
+    public void testBetweenAndInAscending() {
+        setOrdering(Ordering.ASCENDING, "longix", "intix");
+        betweenAndInQuery("longix", 0L, 1000000000000000L, "intix", new Object[] {2, 0}, "idx_long_int_string", 0, 1, 2, 6, 7, 8, 9, 10, 11, 15, 16, 17);
+        betweenAndInQuery("longix", 1000000000000000L, 2000000000000000L, "intix", Arrays.asList(new Object[] {2, 1}), "idx_long_int_string", 12, 13, 14, 15, 16, 17, 21, 22, 23, 24);
+        failOnError();        
+    }
+
+    public void testBetweenAndInDescending() {
+        setOrdering(Ordering.DESCENDING, "longix", "intix", "stringix");
+        betweenAndInQuery("longix", 0L, 1000000000000000L, "intix", new Object[] {2, 0}, "idx_long_int_string", 17, 16, 15, 11, 10, 9, 8, 7, 6, 2, 1, 0);
+        betweenAndInQuery("longix", 1000000000000000L, 2000000000000000L, "intix", Arrays.asList(new Object[] {2, 1}), "idx_long_int_string", 24, 23, 22, 21, 17, 16, 15, 14, 13, 12);
+        failOnError();        
+    }
+
+    /** The strategy for instances is for the "instance number" to create 
+     * the three keys, such that the value of the instance is:
+     * pk1 * PK_MODULUS^2 + pk2 * PK_MODULUS + pk3
+     * 
+     */
+    protected void createInstances(int number) {
+        for (int i = 0; i < number; ++i) {
+            LongIntStringIndex instance = createInstance(i);
+            //System.out.println(toString(instance));
+            instances.add(instance);
+        }
+    }
+
+    /** Create an instance of LongIntStringPK.
+     * @param index the index to use to generate data
+     * @return the instance
+     */
+    protected LongIntStringIndex createInstance(int index) {
+        LongIntStringIndex instance = session.newInstance(LongIntStringIndex.class);
+        instance.setId(index);
+        instance.setLongix(getPK1(index));
+        instance.setIntix(getPK2(index));
+        instance.setStringix(getPK3(index));
+        instance.setStringvalue(getValue(index));
+        return instance;
+    }
+
+    protected long getPK1(int index) {
+        return PRETTY_BIG_NUMBER * ((index / PK_MODULUS / PK_MODULUS) % PK_MODULUS);
+    }
+
+    protected int getPK2(int index) {
+        return ((index / PK_MODULUS) % PK_MODULUS);
+    }
+
+    protected String getPK3(int index) {
+        return "" + (index % PK_MODULUS);
+    }
+
+    protected String getValue(int index) {
+        return "Value " + index;
+    }
+
+    protected String toString(LongIntStringIndex instance) {
+        StringBuffer result = new StringBuffer();
+        result.append("LongIntStringIndex[");
+        result.append(instance.getId());
+        result.append("]: ");
+        result.append(instance.getLongix());
+        result.append(", ");
+        result.append(instance.getIntix());
+        result.append(", \"");
+        result.append(instance.getStringix());
+        result.append("\", \"");
+        result.append(instance.getStringvalue());
+        result.append("\".");
+        return result.toString();
+    }
+
+}

=== modified file 'storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordIndexScanOperationImpl.java'
--- a/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordIndexScanOperationImpl.java	2012-05-17 00:19:25 +0000
+++ b/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordIndexScanOperationImpl.java	2012-05-31 00:47:21 +0000
@@ -137,10 +137,12 @@ public class NdbRecordIndexScanOperation
                 handleError(returnCode, ndbIndexScanOperation);
             }
         } else {
-            // only one range defined
+            // zero or one range defined
             ndbIndexBound = getNdbIndexBound();
-            int returnCode = ndbIndexScanOperation.setBound(ndbRecordKeys.getNdbRecord(), ndbIndexBound);
-            handleError(returnCode, ndbIndexScanOperation);
+            if (ndbIndexBound != null) {
+                int returnCode = ndbIndexScanOperation.setBound(ndbRecordKeys.getNdbRecord(), ndbIndexBound);
+                handleError(returnCode, ndbIndexScanOperation);
+            }
         }
         clusterTransaction.postExecuteCallback(new Runnable() {
             // free structures used to define operation            

=== modified file 'storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordScanOperationImpl.java'
--- a/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordScanOperationImpl.java	2012-05-24 13:50:07 +0000
+++ b/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/NdbRecordScanOperationImpl.java	2012-05-31 00:47:21 +0000
@@ -17,12 +17,15 @@
 
 package com.mysql.clusterj.tie;
 
+import com.mysql.clusterj.ClusterJFatalInternalException;
 import com.mysql.clusterj.core.spi.QueryExecutionContext;
 import com.mysql.clusterj.core.store.ResultData;
 import com.mysql.clusterj.core.store.ScanFilter;
 import com.mysql.clusterj.core.store.ScanOperation;
 import com.mysql.clusterj.core.store.Table;
 
+import com.mysql.clusterj.Query.Ordering;
+
 import com.mysql.ndbjtie.ndbapi.NdbInterpretedCode;
 import com.mysql.ndbjtie.ndbapi.NdbOperationConst;
 import com.mysql.ndbjtie.ndbapi.NdbScanFilter;
@@ -52,6 +55,9 @@ public abstract class NdbRecordScanOpera
     /** The lock mode for this operation */
     int lockMode;
 
+    /** The ordering for this operation */
+    Ordering ordering = null;
+
     public NdbRecordScanOperationImpl(ClusterTransactionImpl clusterTransaction, Table storeTable,
             int lockMode) {
         super(clusterTransaction, storeTable);
@@ -117,16 +123,31 @@ public abstract class NdbRecordScanOpera
     /** Create scan options for this scan. 
      * Scan options are used to set a filter into the NdbScanOperation,
      * set the key info flag if using a lock mode that requires lock takeover, and set the multi range flag.
-     * always set SF_OrderBy to get ordered scans.
+     * set either SF_OrderBy or SF_Descending to get ordered scans.
      */
     protected void getScanOptions() {
-        long options = (long)Type.SO_SCANFLAGS;
-        int flags = ScanFlag.SF_OrderBy;
+        long options = 0;
+        int flags = 0;
         scanOptions = db.createScanOptions();
+        if (ordering != null) {
+            options |= Type.SO_SCANFLAGS;
+            switch (ordering) {
+                case ASCENDING:
+                    flags = ScanFlag.SF_OrderBy;
+                    break;
+                case DESCENDING:
+                    flags = ScanFlag.SF_Descending;
+                    break;
+                default:
+                    throw new ClusterJFatalInternalException(local.message("ERR_Invalid_Ordering", ordering));
+            }
+        }
         if (multiRange) {
+            options |= Type.SO_SCANFLAGS;
             flags |= ScanFlag.SF_MultiRange;
         }
         if (lockMode != com.mysql.ndbjtie.ndbapi.NdbOperationConst.LockMode.LM_CommittedRead) {
+            options |= Type.SO_SCANFLAGS;
             flags |= ScanFlag.SF_KeyInfo;
         }
         if (ndbScanFilter != null) {
@@ -225,4 +246,8 @@ public abstract class NdbRecordScanOpera
         return result;
     }
 
+    public void setOrdering(Ordering ordering) {
+        this.ordering = ordering;
+    }
+
 }

=== modified file 'storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/ScanOperationImpl.java'
--- a/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/ScanOperationImpl.java	2012-05-29 17:15:08 +0000
+++ b/storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/ScanOperationImpl.java	2012-05-31 15:07:44 +0000
@@ -1,5 +1,5 @@
 /*
- *  Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+ *  Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved.
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
@@ -22,6 +22,7 @@ import com.mysql.ndbjtie.ndbapi.NdbOpera
 import com.mysql.ndbjtie.ndbapi.NdbScanFilter;
 import com.mysql.ndbjtie.ndbapi.NdbScanOperation;
 
+import com.mysql.clusterj.Query.Ordering;
 import com.mysql.clusterj.core.spi.QueryExecutionContext;
 import com.mysql.clusterj.core.store.ResultData;
 import com.mysql.clusterj.core.store.ScanFilter;
@@ -35,6 +36,8 @@ class ScanOperationImpl extends Operatio
 
     private NdbScanOperation ndbScanOperation;
 
+    private Ordering ordering = null;
+
     ScanOperationImpl(Table storeTable, NdbScanOperation operation,
             ClusterTransactionImpl clusterTransaction) {
         super(storeTable, operation, clusterTransaction);
@@ -93,4 +96,8 @@ class ScanOperationImpl extends Operatio
         }
     }
 
+    public void setOrdering(Ordering ordering) {
+        this.ordering = ordering;
+    }
+
 }

=== added file 'storage/ndb/clusterj/clusterj-tie/src/test/java/testsuite/clusterj/tie/QueryOrderingTest.java'
--- a/storage/ndb/clusterj/clusterj-tie/src/test/java/testsuite/clusterj/tie/QueryOrderingTest.java	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/clusterj/clusterj-tie/src/test/java/testsuite/clusterj/tie/QueryOrderingTest.java	2012-05-31 00:47:21 +0000
@@ -0,0 +1,22 @@
+/*
+   Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+ */
+
+package testsuite.clusterj.tie;
+
+public class QueryOrderingTest extends testsuite.clusterj.QueryOrderingTest {
+
+}

=== modified file 'storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp	2012-05-21 23:05:17 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp	2012-05-31 15:07:44 +0000
@@ -2632,9 +2632,9 @@ private:
   bool checkTransporterOverloaded(Signal* signal,
                                   const NodeBitmask& all,
                                   const class LqhKeyReq* req);
-  void noFreeRecordLab(Signal* signal, 
-		       const class LqhKeyReq * lqhKeyReq, 
-		       Uint32 errorCode);
+  void earlyKeyReqAbort(Signal* signal, 
+                        const class LqhKeyReq * lqhKeyReq, 
+                        Uint32 errorCode);
   void logLqhkeyrefLab(Signal* signal);
   void closeCopyLab(Signal* signal);
   void commitReplyLab(Signal* signal);

=== modified file 'storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp	2012-05-29 17:15:08 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp	2012-05-31 15:07:44 +0000
@@ -2973,22 +2973,53 @@ void Dblqh::execTIME_SIGNAL(Signal* sign
 /* INVOLVE COMMUNICATION WITH ACC AND TUP.                                   */
 /* ######################################################################### */
 
-void Dblqh::noFreeRecordLab(Signal* signal, 
-			    const LqhKeyReq * lqhKeyReq,
-			    Uint32 errCode) 
+/**
+ * earlyKeyReqAbort
+ *
+ * Exit early from handling an LQHKEYREQ request.
+ * Method determines which resources (if any) need freed, then
+ * signals requestor with error response.
+ * * Verify all required resources are freed if adding new callers *
+ */
+void Dblqh::earlyKeyReqAbort(Signal* signal, 
+                             const LqhKeyReq * lqhKeyReq,
+                             Uint32 errCode) 
 {
   jamEntry();
   const Uint32 transid1  = lqhKeyReq->transId1;
   const Uint32 transid2  = lqhKeyReq->transId2;
   const Uint32 reqInfo   = lqhKeyReq->requestInfo;
   
-  if(errCode == ZNO_FREE_MARKER_RECORDS_ERROR ||
-     errCode == ZNODE_SHUTDOWN_IN_PROGESS ||
-     errCode == ZNODE_FAILURE_ERROR){
+  bool tcConnectRecAllocated = (tcConnectptr.i != RNIL);
+
+  if (tcConnectRecAllocated)
+  {
     jam();
+    
+    /* Could have a commit-ack marker allocated. */
+    remove_commit_marker(tcConnectptr.p);
+    
+    /* Could have long key/attr sections linked */
+    ndbrequire(tcConnectptr.p->m_dealloc == 0);
+    releaseOprec(signal);
+    
+    
+    /* 
+     * Free the TcConnectRecord, ensuring that the
+     * table reference counts have not been incremented and
+     * so will not be decremented.
+     * Also verify that we're not present in the transid 
+     * hash
+     */
+    ndbrequire(tcConnectptr.p->tableref == RNIL);
+    /* Following is not 100% check, but a reasonable guard */
+    ndbrequire(tcConnectptr.p->nextHashRec == RNIL);
+    ndbrequire(tcConnectptr.p->prevHashRec == RNIL);
     releaseTcrec(signal, tcConnectptr);
   }
 
+  /* Now perform signalling */
+
   if (LqhKeyReq::getDirtyFlag(reqInfo) && 
       LqhKeyReq::getOperation(reqInfo) == ZREAD &&
       !LqhKeyReq::getNormalProtocolFlag(reqInfo)){
@@ -3030,7 +3061,7 @@ void Dblqh::noFreeRecordLab(Signal* sign
 	       LqhKeyRef::SignalLength, JBB);
   }//if
   return;
-}//Dblqh::noFreeRecordLab()
+}//Dblqh::earlyKeyReqAbort()
 
 Uint32
 Dblqh::get_table_state_error(Ptr<Tablerec> tabPtr) const
@@ -4349,10 +4380,11 @@ void Dblqh::execSIGNAL_DROPPED_REP(Signa
      * We will notify the client that their LQHKEYREQ
      * failed
      */
+    tcConnectptr.i = RNIL;
     const LqhKeyReq * const truncatedLqhKeyReq = 
       (LqhKeyReq *) &rep->originalData[0];
     
-    noFreeRecordLab(signal, truncatedLqhKeyReq, ZGET_DATAREC_ERROR);
+    earlyKeyReqAbort(signal, truncatedLqhKeyReq, ZGET_DATAREC_ERROR);
 
     break;
   }
@@ -4407,6 +4439,7 @@ void Dblqh::execLQHKEYREQ(Signal* signal
 
   const LqhKeyReq * const lqhKeyReq = (LqhKeyReq *)signal->getDataPtr();
   SectionHandle handle(this, signal);
+  tcConnectptr.i = RNIL;
 
   {
     const NodeBitmask& all = globalTransporterRegistry.get_status_overloaded();
@@ -4419,7 +4452,7 @@ void Dblqh::execLQHKEYREQ(Signal* signal
          */
         jam();
         releaseSections(handle);
-        noFreeRecordLab(signal, lqhKeyReq, ZTRANSPORTER_OVERLOADED_ERROR);
+        earlyKeyReqAbort(signal, lqhKeyReq, ZTRANSPORTER_OVERLOADED_ERROR);
         return;
       }
     }
@@ -4429,7 +4462,7 @@ void Dblqh::execLQHKEYREQ(Signal* signal
   {
     jam();
     releaseSections(handle);
-    noFreeRecordLab(signal, lqhKeyReq, ZTRANSPORTER_OVERLOADED_ERROR);
+    earlyKeyReqAbort(signal, lqhKeyReq, ZTRANSPORTER_OVERLOADED_ERROR);
     return;
   }
 
@@ -4442,7 +4475,7 @@ void Dblqh::execLQHKEYREQ(Signal* signal
 /* NO FREE TC RECORD AVAILABLE, THUS WE CANNOT HANDLE THE REQUEST.           */
 /* ------------------------------------------------------------------------- */
     releaseSections(handle);
-    noFreeRecordLab(signal, lqhKeyReq, ZNO_TC_CONNECT_ERROR);
+    earlyKeyReqAbort(signal, lqhKeyReq, ZNO_TC_CONNECT_ERROR);
     return;
   }//if
 
@@ -4488,14 +4521,14 @@ void Dblqh::execLQHKEYREQ(Signal* signal
   const Uint8 op = LqhKeyReq::getOperation(Treqinfo);
   if ((op == ZREAD || op == ZREAD_EX) && !getAllowRead()){
     releaseSections(handle);
-    noFreeRecordLab(signal, lqhKeyReq, ZNODE_SHUTDOWN_IN_PROGESS);
+    earlyKeyReqAbort(signal, lqhKeyReq, ZNODE_SHUTDOWN_IN_PROGESS);
     return;
   }
 
   if (unlikely(get_node_status(refToNode(sig5)) != ZNODE_UP))
   {
     releaseSections(handle);
-    noFreeRecordLab(signal, lqhKeyReq, ZNODE_FAILURE_ERROR);
+    earlyKeyReqAbort(signal, lqhKeyReq, ZNODE_FAILURE_ERROR);
     return;
   }
   
@@ -4553,7 +4586,7 @@ void Dblqh::execLQHKEYREQ(Signal* signal
       if (markerPtr.i == RNIL)
       {
         releaseSections(handle);
-        noFreeRecordLab(signal, lqhKeyReq, ZNO_FREE_MARKER_RECORDS_ERROR);
+        earlyKeyReqAbort(signal, lqhKeyReq, ZNO_FREE_MARKER_RECORDS_ERROR);
         return;
       }
       markerPtr.p->transid1 = sig1;
@@ -4708,8 +4741,7 @@ void Dblqh::execLQHKEYREQ(Signal* signal
     if (unlikely(!ok))
     {
       jam();
-      terrorCode= ZGET_DATAREC_ERROR;
-      abortErrorLab(signal);
+      earlyKeyReqAbort(signal, lqhKeyReq, ZGET_DATAREC_ERROR);
       return;
     }
 
@@ -4725,8 +4757,9 @@ void Dblqh::execLQHKEYREQ(Signal* signal
     {
       jam();
       ndbassert(! LqhKeyReq::getNrCopyFlag(Treqinfo));
-      terrorCode = ZNO_TUPLE_FOUND;
-      abortErrorLab(signal);
+      
+      /* Reply with NO_TUPLE_FOUND */
+      earlyKeyReqAbort(signal, lqhKeyReq, ZNO_TUPLE_FOUND);
       return;
     }
 
@@ -4784,6 +4817,9 @@ void Dblqh::execLQHKEYREQ(Signal* signal
       return;
     }//if
   }//if
+  /* Check that no equal element exists */
+  ndbassert(findTransaction(regTcPtr->transid[0], regTcPtr->transid[1], 
+                            regTcPtr->tcOprec, 0) == ZNOT_FOUND);
   TcConnectionrecPtr localNextTcConnectptr;
   Uint32 hashIndex = (regTcPtr->transid[0] ^ regTcPtr->tcOprec) & 1023;
   localNextTcConnectptr.i = ctransidHash[hashIndex];
@@ -4797,6 +4833,7 @@ void Dblqh::execLQHKEYREQ(Signal* signal
     ptrCheckGuard(localNextTcConnectptr, 
                   ctcConnectrecFileSize, tcConnectionrec);
     jam();
+    ndbassert(localNextTcConnectptr.p->prevHashRec == RNIL);
     localNextTcConnectptr.p->prevHashRec = tcConnectptr.i;
   }//if
   if (tabptr.i >= ctabrecFileSize) {
@@ -7244,9 +7281,12 @@ void Dblqh::deleteTransidHash(Signal* si
 
   prevHashptr.i = regTcPtr->prevHashRec;
   nextHashptr.i = regTcPtr->nextHashRec;
+  /* prevHashptr and nextHashptr may be RNIL when the bucket has 1 element */
+
   if (prevHashptr.i != RNIL) {
     jam();
     ptrCheckGuard(prevHashptr, ctcConnectrecFileSize, tcConnectionrec);
+    ndbassert(prevHashptr.p->nextHashRec == tcConnectptr.i);
     prevHashptr.p->nextHashRec = nextHashptr.i;
   } else {
     jam();
@@ -7255,11 +7295,13 @@ void Dblqh::deleteTransidHash(Signal* si
 /* A NEW LEADER OF THE LIST.                                                 */
 /* ------------------------------------------------------------------------- */
     Uint32 hashIndex = (regTcPtr->transid[0] ^ regTcPtr->tcOprec) & 1023;
+    ndbassert(ctransidHash[hashIndex] == tcConnectptr.i);
     ctransidHash[hashIndex] = nextHashptr.i;
   }//if
   if (nextHashptr.i != RNIL) {
     jam();
     ptrCheckGuard(nextHashptr, ctcConnectrecFileSize, tcConnectionrec);
+    ndbassert(nextHashptr.p->prevHashRec == tcConnectptr.i);
     nextHashptr.p->prevHashRec = prevHashptr.i;
   }//if
 
@@ -10456,6 +10498,11 @@ void Dblqh::execSCAN_FRAGREQ(Signal* sig
   }//if
   cbookedAccOps += max_rows;
 
+  /* Check that no equal element already exists */
+  ndbassert(findTransaction(tcConnectptr.p->transid[0],
+                            tcConnectptr.p->transid[1],
+                            tcConnectptr.p->tcOprec,
+                            senderHi) == ZNOT_FOUND);
   hashIndex = (tcConnectptr.p->transid[0] ^ tcConnectptr.p->tcOprec) & 1023;
   nextHashptr.i = ctransidHash[hashIndex];
   ctransidHash[hashIndex] = tcConnectptr.i;
@@ -10468,6 +10515,7 @@ void Dblqh::execSCAN_FRAGREQ(Signal* sig
      *   IF IT EXISTS
      * --------------------------------------------------------------------- */
     ptrCheckGuard(nextHashptr, ctcConnectrecFileSize, tcConnectionrec);
+    ndbassert(nextHashptr.p->prevHashRec == RNIL);
     nextHashptr.p->prevHashRec = tcConnectptr.i;
   }//if
   if ((! isLongReq ) && 
@@ -20808,6 +20856,8 @@ void Dblqh::initialiseTcrec(Signal* sign
       tcConnectptr.p->attrInfoIVal = RNIL;
       tcConnectptr.p->m_flags= 0;
       tcConnectptr.p->tcTimer = 0;
+      tcConnectptr.p->nextHashRec = RNIL;
+      tcConnectptr.p->prevHashRec = RNIL;
       tcConnectptr.p->nextTcConnectrec = tcConnectptr.i + 1;
     }//for
     tcConnectptr.i = ctcConnectrecFileSize - 1;
@@ -23015,10 +23065,12 @@ Dblqh::execDUMP_STATE_ORD(Signal* signal
   Uint32 ttcConnectrecFileSize = ctcConnectrecFileSize;
   if(arg == 2306)
   {
+    Uint32 bucketLen[1024];
     for(Uint32 i = 0; i<1024; i++)
     {
       TcConnectionrecPtr tcRec;
       tcRec.i = ctransidHash[i];
+      bucketLen[i] = 0;
       while(tcRec.i != RNIL)
       {
 	ptrCheckGuard(tcRec, ttcConnectrecFileSize, regTcConnectionrec);
@@ -23027,8 +23079,18 @@ Dblqh::execDUMP_STATE_ORD(Signal* signal
 	signal->theData[1] = tcRec.i;
 	execDUMP_STATE_ORD(signal);
 	tcRec.i = tcRec.p->nextHashRec;
+        bucketLen[i]++;
+      }
+    }
+    ndbout << "LQH transid hash bucket lengths : " << endl;
+    for (Uint32 i = 0; i < 1024; i++)
+    {
+      if (bucketLen[i] > 0)
+      {
+        ndbout << " bucket " << i << " len " << bucketLen[i] << endl;
       }
     }
+    ndbout << "Done." << endl;
   }
 
   if(arg == 2307 || arg == 2308)

No bundle (reason: useless for push emails).
Thread
bzr push into mysql-5.5-cluster-7.2 branch (frazer.clement:3927 to 3929)Bug#14083116Frazer Clement31 May