MySQL Lists are EOL. Please join:

List:Commits« Previous MessageNext Message »
From:pekka Date:May 2 2006 12:34pm
Subject:bk commit into 4.1 tree (pekka:1.2475) BUG#19201
View as plain text  
Below is the list of changes that have just been committed into a local
4.1 repository of pekka. When pekka does a push these changes will
be propagated to the main repository and, within 24 hours after the
push, to the public repository.
For information on how to access the public repository
see http://dev.mysql.com/doc/mysql/en/installing-source-tree.html

ChangeSet
  1.2475 06/05/02 14:33:55 pekka@stripped +7 -0
  ndb - bug#19201 (4.1), see comment in NdbBlob.cpp

  ndb/src/ndbapi/NdbBlob.cpp
    1.21 06/05/02 14:31:23 pekka@stripped +23 -1
    bug#19201

  ndb/include/ndbapi/NdbBlob.hpp
    1.11 06/05/02 14:31:23 pekka@stripped +1 -0
    bug#19201

  ndb/include/kernel/signaldata/TcKeyReq.hpp
    1.5 06/05/02 14:31:23 pekka@stripped +1 -0
    bug#19201

  mysql-test/t/ndb_blob.test
    1.16 06/05/02 14:31:23 pekka@stripped +22 -3
    bug#19201

  mysql-test/r/ndb_blob.result
    1.16 06/05/02 14:31:23 pekka@stripped +13 -5
    bug#19201

  ndb/tools/delete_all.cpp
    1.13 06/05/02 14:29:24 pekka@stripped +14 -17
    option --transactional (default remains truncate)

  ndb/test/ndbapi/testBlobs.cpp
    1.25 06/05/02 14:28:32 pekka@stripped +9 -14
    option -fac (fetch across commit)

# This is a BitKeeper patch.  What follows are the unified diffs for the
# set of deltas contained in the patch.  The rest of the patch, the part
# that BitKeeper cares about, is below these diffs.
# User:	pekka
# Host:	orca.ndb.mysql.com
# Root:	/space/pekka/ndb/version/my41

--- 1.4/ndb/include/kernel/signaldata/TcKeyReq.hpp	2004-08-18 09:44:19 +02:00
+++ 1.5/ndb/include/kernel/signaldata/TcKeyReq.hpp	2006-05-02 14:31:23 +02:00
@@ -39,6 +39,7 @@
   friend class NdbOperation; 
   friend class NdbIndexOperation;
   friend class NdbScanOperation;
+  friend class NdbBlob;
   friend class DbUtil;
 
   /**

--- 1.24/ndb/test/ndbapi/testBlobs.cpp	2006-02-05 22:06:16 +01:00
+++ 1.25/ndb/test/ndbapi/testBlobs.cpp	2006-05-02 14:28:32 +02:00
@@ -45,6 +45,7 @@
   bool m_dbg;
   bool m_dbgall;
   const char* m_dbug;
+  bool m_fac;
   bool m_full;
   unsigned m_loop;
   unsigned m_parts;
@@ -73,6 +74,7 @@
     m_dbg(false),
     m_dbgall(false),
     m_dbug(0),
+    m_fac(false),
     m_full(false),
     m_loop(1),
     m_parts(10),
@@ -111,6 +113,7 @@
     << "  -dbg        print debug" << endl
     << "  -dbgall     print also NDB API debug (if compiled in)" << endl
     << "  -dbug opt   dbug options" << endl
+    << "  -fac        fetch across commit in scan delete [" << d.m_fac << "]" << endl
     << "  -full       read/write only full blob values" << endl
     << "  -loop N     loop N times 0=forever [" << d.m_loop << "]" << endl
     << "  -parts N    max parts in blob value [" << d.m_parts << "]" << endl
@@ -1260,23 +1263,11 @@
       CHK((ret = rs->nextResult(false)) == 0 || ret == 1 || ret == 2);
       if (++n == g_opt.m_batch || ret == 2) {
         DBG("execute batch: n=" << n << " ret=" << ret);
-        switch (0) {
-        case 0: // works normally
+        if (! g_opt.m_fac) {
           CHK(g_con->execute(NoCommit) == 0);
-          CHK(true || g_con->restart() == 0);
-          break;
-        case 1: // nonsense - g_con is invalid for 2nd batch
-          CHK(g_con->execute(Commit) == 0);
-          CHK(true || g_con->restart() == 0);
-          break;
-        case 2: // DBTC sendSignalErrorRefuseLab
-          CHK(g_con->execute(NoCommit) == 0);
-          CHK(g_con->restart() == 0);
-          break;
-        case 3: // 266 time-out
+        } else {
           CHK(g_con->execute(Commit) == 0);
           CHK(g_con->restart() == 0);
-          break;
         }
         n = 0;
       }
@@ -1823,6 +1814,10 @@
         g_opt.m_dbug = strdup(argv[0]);
 	continue;
       }
+    }
+    if (strcmp(arg, "-fac") == 0) {
+      g_opt.m_fac = true;
+      continue;
     }
     if (strcmp(arg, "-full") == 0) {
       g_opt.m_full = true;

--- 1.12/ndb/tools/delete_all.cpp	2006-02-05 22:10:18 +01:00
+++ 1.13/ndb/tools/delete_all.cpp	2006-05-02 14:29:24 +02:00
@@ -23,17 +23,21 @@
 #include <NDBT.hpp>
 
 static int clear_table(Ndb* pNdb, const NdbDictionary::Table* pTab,
-                       bool commit_across_open_cursor, int parallelism=240);
+                       bool fetch_across_commit, int parallelism=240);
 
 NDB_STD_OPTS_VARS;
 
 static const char* _dbname = "TEST_DB";
+static my_bool _transactional = false;
 static struct my_option my_long_options[] =
 {
   NDB_STD_OPTS("ndb_desc"),
   { "database", 'd', "Name of database table is in",
     (gptr*) &_dbname, (gptr*) &_dbname, 0,
     GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
+  { "transactional", 't', "Single transaction (may run out of operations)",
+    (gptr*) &_transactional, (gptr*) &_transactional, 0,
+    GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
   { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
 };
 static void usage()
@@ -82,18 +86,11 @@
       ndbout << " Table " << argv[i] << " does not exist!" << endl;
       return NDBT_ProgramExit(NDBT_WRONGARGS);
     }
-    // Check if we have any blobs
-    bool commit_across_open_cursor = true;
-    for (int j = 0; j < pTab->getNoOfColumns(); j++) {
-      NdbDictionary::Column::Type t = pTab->getColumn(j)->getType();
-      if (t == NdbDictionary::Column::Blob ||
-          t == NdbDictionary::Column::Text) {
-        commit_across_open_cursor = false;
-        break;
-      }
-    }
-    ndbout << "Deleting all from " << argv[i] << "...";
-    if(clear_table(&MyNdb, pTab, commit_across_open_cursor) == NDBT_FAILED){
+    ndbout << "Deleting all from " << argv[i];
+    if (! _transactional)
+      ndbout << " (non-transactional)";
+    ndbout << " ...";
+    if(clear_table(&MyNdb, pTab, ! _transactional) == NDBT_FAILED){
       res = NDBT_FAILED;
       ndbout << "FAILED" << endl;
     }
@@ -103,7 +100,7 @@
 
 
 int clear_table(Ndb* pNdb, const NdbDictionary::Table* pTab,
-                bool commit_across_open_cursor, int parallelism)
+                bool fetch_across_commit, int parallelism)
 {
   // Scan all records exclusive and delete 
   // them one by one
@@ -134,7 +131,7 @@
       }
       goto failed;
     }
-    
+
     pOp = pTrans->getNdbScanOperation(pTab->getName());	
     if (pOp == NULL) {
       goto failed;
@@ -165,7 +162,7 @@
       } while((check = rs->nextResult(false)) == 0);
       
       if(check != -1){
-        if (commit_across_open_cursor) {
+        if (fetch_across_commit) {
           check = pTrans->execute(Commit);   
           pTrans->restart(); // new tx id
         } else {
@@ -196,7 +193,7 @@
       }
       goto failed;
     }
-    if (! commit_across_open_cursor && pTrans->execute(Commit) != 0) {
+    if (! fetch_across_commit && pTrans->execute(Commit) != 0) {
       err = pTrans->getNdbError();
       goto failed;
     }

--- 1.15/mysql-test/r/ndb_blob.result	2006-02-03 15:22:46 +01:00
+++ 1.16/mysql-test/r/ndb_blob.result	2006-05-02 14:31:23 +02:00
@@ -481,14 +481,22 @@
 insert into t1 (msg) values(
 'Tries to validate (8 byte length + inline bytes) as UTF8 :(
 Fast fix: removed validation for Text.  It is not yet indexable
-so bad data will not crash kernel.
-Proper fix: Set inline bytes to multiple of mbmaxlen and
-validate it (after the 8 byte length).');
+so bad data will not crash kernel.');
 select * from t1;
 id	msg
 1	Tries to validate (8 byte length + inline bytes) as UTF8 :(
 Fast fix: removed validation for Text.  It is not yet indexable
 so bad data will not crash kernel.
-Proper fix: Set inline bytes to multiple of mbmaxlen and
-validate it (after the 8 byte length).
+drop table t1;
+create table t1 (
+a int primary key not null auto_increment,
+b text
+) engine=ndbcluster;
+select count(*) from t1;
+count(*)
+500
+truncate t1;
+select count(*) from t1;
+count(*)
+0
 drop table t1;

--- 1.15/mysql-test/t/ndb_blob.test	2006-02-03 15:22:46 +01:00
+++ 1.16/mysql-test/t/ndb_blob.test	2006-05-02 14:31:23 +02:00
@@ -403,10 +403,29 @@
 insert into t1 (msg) values(
 'Tries to validate (8 byte length + inline bytes) as UTF8 :(
 Fast fix: removed validation for Text.  It is not yet indexable
-so bad data will not crash kernel.
-Proper fix: Set inline bytes to multiple of mbmaxlen and
-validate it (after the 8 byte length).');
+so bad data will not crash kernel.');
 select * from t1;
+drop table t1;
+
+# -- bug #19201
+create table t1 (
+  a int primary key not null auto_increment,
+  b text
+) engine=ndbcluster;
+--disable_query_log
+set autocommit=1;
+# more rows than batch size (64)
+# for this bug no blob parts would be necessary
+let $1 = 500;
+while ($1)
+{
+  insert into t1 (b) values (repeat('x',4000));
+  dec $1;
+}
+--enable_query_log
+select count(*) from t1;
+truncate t1;
+select count(*) from t1;
 drop table t1;
 
 # End of 4.1 tests

--- 1.10/ndb/include/ndbapi/NdbBlob.hpp	2005-09-16 11:26:30 +02:00
+++ 1.11/ndb/include/ndbapi/NdbBlob.hpp	2006-05-02 14:31:23 +02:00
@@ -275,6 +275,7 @@
   bool isWriteOp();
   bool isDeleteOp();
   bool isScanOp();
+  bool isTakeOverOp();
   // computations
   Uint32 getPartNumber(Uint64 pos);
   Uint32 getPartCount();

--- 1.20/ndb/src/ndbapi/NdbBlob.cpp	2006-02-03 15:22:46 +01:00
+++ 1.21/ndb/src/ndbapi/NdbBlob.cpp	2006-05-02 14:31:23 +02:00
@@ -23,6 +23,7 @@
 #include <NdbBlob.hpp>
 #include "NdbBlobImpl.hpp"
 #include <NdbScanOperation.hpp>
+#include <signaldata/TcKeyReq.hpp>
 
 #ifdef NDB_BLOB_DEBUG
 #define DBG(x) \
@@ -290,6 +291,13 @@
     theNdbOp->theOperationType == NdbOperation::OpenRangeScanRequest;
 }
 
+inline bool
+NdbBlob::isTakeOverOp()
+{
+  return
+    TcKeyReq::getTakeOverScanFlag(theNdbOp->theScanInfo);
+}
+
 // computations (inline)
 
 inline Uint32
@@ -1218,8 +1226,22 @@
     if (isUpdateOp() || isWriteOp() || isDeleteOp()) {
       // add operation before this one to read head+inline
       NdbOperation* tOp = theNdbCon->getNdbOperation(theTable, theNdbOp);
+      /*
+       * If main op is from take over scan lock, the added read is done
+       * as committed read:
+       *
+       * In normal transactional case, the row is locked by us and
+       * committed read returns same as normal read.
+       *
+       * In current TRUNCATE TABLE, the deleting trans is committed in
+       * batches and then restarted with new trans id.  A normal read
+       * would hang on the scan delete lock and then fail.
+       */
+      NdbOperation::LockMode lockMode =
+        ! isTakeOverOp() ?
+          NdbOperation::LM_Read : NdbOperation::LM_CommittedRead;
       if (tOp == NULL ||
-          tOp->readTuple() == -1 ||
+          tOp->readTuple(lockMode) == -1 ||
           setTableKeyValue(tOp) == -1 ||
           getHeadInlineValue(tOp) == -1) {
         setErrorCode(tOp);
Thread
bk commit into 4.1 tree (pekka:1.2475) BUG#19201pekka2 May