MySQL Lists are EOL. Please join:

List:Commits« Previous MessageNext Message »
From:Alex Ivanov Notebook Date:April 20 2006 10:03pm
Subject:bk commit into 5.1 tree (aivanov:1.2358)
View as plain text  
Below is the list of changes that have just been committed into a local
5.1 repository of alexi. When alexi does a push these changes will
be propagated to the main repository and, within 24 hours after the
push, to the public repository.
For information on how to access the public repository
see http://dev.mysql.com/doc/mysql/en/installing-source-tree.html

ChangeSet
  1.2358 06/04/21 02:03:30 aivanov@stripped +11 -0
  Merge mysql.com:/home/alexi/innodb/mysql-5.0-work
  into  mysql.com:/home/alexi/innodb/mysql-5.1
   Null-merge.

  storage/innobase/row/row0mysql.c
    1.123 06/04/21 02:03:25 aivanov@stripped +0 -18
    Null-merge.

  storage/innobase/log/log0recv.c
    1.52 06/04/21 02:03:25 aivanov@stripped +3 -3
    Null-merge.

  storage/innobase/include/univ.i
    1.50 06/04/21 02:03:25 aivanov@stripped +0 -3
    Null-merge.

  storage/innobase/include/dict0mem.h
    1.28 06/04/21 02:03:25 aivanov@stripped +0 -7
    Null-merge.

  storage/innobase/include/dict0dict.h
    1.43 06/04/21 02:03:25 aivanov@stripped +0 -9
    Null-merge.

  storage/innobase/ibuf/ibuf0ibuf.c
    1.41 06/04/21 02:03:25 aivanov@stripped +3 -3
    Null-merge.

  storage/innobase/dict/dict0mem.c
    1.20 06/04/21 02:03:25 aivanov@stripped +0 -18
    Null-merge.

  storage/innobase/dict/dict0load.c
    1.46 06/04/21 02:03:25 aivanov@stripped +5 -10
    Null-merge.

  storage/innobase/dict/dict0dict.c
    1.79 06/04/21 02:03:25 aivanov@stripped +4 -34
    Null-merge.

  mysql-test/t/innodb.test
    1.138 06/04/21 02:03:25 aivanov@stripped +1 -5
    Null-merge.

  mysql-test/r/innodb.result
    1.171 06/04/21 02:03:25 aivanov@stripped +0 -3
    Null-merge.

  storage/innobase/row/row0mysql.c
    1.103.13.3 06/04/21 02:02:19 aivanov@stripped +0 -0
    Merge rename: innobase/row/row0mysql.c -> storage/innobase/row/row0mysql.c

  storage/innobase/log/log0recv.c
    1.46.4.2 06/04/21 02:02:19 aivanov@stripped +0 -0
    Merge rename: innobase/log/log0recv.c -> storage/innobase/log/log0recv.c

  storage/innobase/include/univ.i
    1.40.6.3 06/04/21 02:02:19 aivanov@stripped +0 -0
    Merge rename: innobase/include/univ.i -> storage/innobase/include/univ.i

  storage/innobase/include/dict0mem.h
    1.24.2.3 06/04/21 02:02:18 aivanov@stripped +0 -0
    Merge rename: innobase/include/dict0mem.h -> storage/innobase/include/dict0mem.h

  storage/innobase/include/dict0dict.h
    1.31.8.3 06/04/21 02:02:18 aivanov@stripped +0 -0
    Merge rename: innobase/include/dict0dict.h -> storage/innobase/include/dict0dict.h

  storage/innobase/ibuf/ibuf0ibuf.c
    1.36.3.2 06/04/21 02:02:18 aivanov@stripped +0 -0
    Merge rename: innobase/ibuf/ibuf0ibuf.c -> storage/innobase/ibuf/ibuf0ibuf.c

  storage/innobase/dict/dict0mem.c
    1.16.2.3 06/04/21 02:02:18 aivanov@stripped +0 -0
    Merge rename: innobase/dict/dict0mem.c -> storage/innobase/dict/dict0mem.c

  storage/innobase/dict/dict0load.c
    1.37.6.2 06/04/21 02:02:18 aivanov@stripped +0 -0
    Merge rename: innobase/dict/dict0load.c -> storage/innobase/dict/dict0load.c

  storage/innobase/dict/dict0dict.c
    1.65.10.3 06/04/21 02:02:18 aivanov@stripped +0 -0
    Merge rename: innobase/dict/dict0dict.c -> storage/innobase/dict/dict0dict.c

# This is a BitKeeper patch.  What follows are the unified diffs for the
# set of deltas contained in the patch.  The rest of the patch, the part
# that BitKeeper cares about, is below these diffs.
# User:	aivanov
# Host:	mysqld.localdomain
# Root:	/home/alexi/innodb/mysql-5.1/RESYNC

--- 1.65.10.2/innobase/dict/dict0dict.c	2006-04-21 01:07:32 +04:00
+++ 1.79/storage/innobase/dict/dict0dict.c	2006-04-21 02:03:25 +04:00
@@ -132,7 +132,7 @@
 /*=================*/
 				/* out: TRUE if success */
 	dict_table_t*	table,	/* in: table */
-	dict_index_t*	index);	/* in: index */	
+	dict_index_t*	index);	/* in: index */
 /***********************************************************************
 Builds the internal dictionary cache representation for a clustered
 index, containing also system fields not defined by the user. */
@@ -144,7 +144,7 @@
 				of the clustered index */
 	dict_table_t*	table,	/* in: table */
 	dict_index_t*	index);	/* in: user representation of a clustered
-				index */	
+				index */
 /***********************************************************************
 Builds the internal dictionary cache representation for a non-clustered
 index, containing also system fields not defined by the user. */
@@ -156,7 +156,7 @@
 				of the non-clustered index */
 	dict_table_t*	table,	/* in: table */
 	dict_index_t*	index);	/* in: user representation of a non-clustered
-				index */	
+				index */
 /**************************************************************************
 Removes a foreign constraint struct from the dictionary cache. */
 static
@@ -196,9 +196,10 @@
 /* Stream for storing detailed information about the latest foreign key
 and unique key errors */
 FILE*	dict_foreign_err_file		= NULL;
-mutex_t	dict_foreign_err_mutex; 	/* mutex protecting the foreign
+mutex_t	dict_foreign_err_mutex;		/* mutex protecting the foreign
 					and unique error buffers */
-	
+
+#ifndef UNIV_HOTBACKUP
 /**********************************************************************
 Makes all characters in a NUL-terminated UTF-8 string lower case. */
 
@@ -209,6 +210,7 @@
 {
 	innobase_casedn_str(a);
 }
+#endif /* !UNIV_HOTBACKUP */
 
 /************************************************************************
 Checks if the database name in two table names is the same. */
@@ -263,7 +265,7 @@
 	ut_a(s);
 	return(s - name);
 }
-	
+
 /************************************************************************
 Reserves the dictionary system mutex for MySQL. */
 
@@ -273,7 +275,7 @@
 {
 	mutex_enter(&(dict_sys->mutex));
 }
-	
+
 /************************************************************************
 Releases the dictionary system mutex for MySQL. */
 
@@ -283,7 +285,7 @@
 {
 	mutex_exit(&(dict_sys->mutex));
 }
-	
+
 /************************************************************************
 Decrements the count of open MySQL handles to a table. */
 
@@ -297,7 +299,7 @@
 	ut_a(table->n_mysql_handles_opened > 0);
 
 	table->n_mysql_handles_opened--;
-	
+
 	mutex_exit(&(dict_sys->mutex));
 }
 
@@ -350,7 +352,7 @@
 {
 	return(dict_table_get_index(table, name));
 }
-	
+
 /************************************************************************
 Initializes the autoinc counter. It is not an error to initialize an already
 initialized counter. */
@@ -390,7 +392,7 @@
 		value = table->autoinc;
 		table->autoinc = table->autoinc + 1;
 	}
-	
+
 	mutex_exit(&(table->autoinc_mutex));
 
 	return(value);
@@ -407,7 +409,7 @@
 	mutex_enter(&(table->autoinc_mutex));
 
 	table->autoinc = table->autoinc - 1;
-	
+
 	mutex_exit(&(table->autoinc_mutex));
 }
 
@@ -431,7 +433,7 @@
 	} else {
 		value = table->autoinc;
 	}
-	
+
 	mutex_exit(&(table->autoinc_mutex));
 
 	return(value);
@@ -476,7 +478,7 @@
 		if (value >= table->autoinc) {
 			table->autoinc = value + 1;
 		}
-	}	
+	}
 
 	mutex_exit(&(table->autoinc_mutex));
 }
@@ -497,7 +499,7 @@
 	dict_col_t*	col;
 	ulint		pos;
 	ulint		n_fields;
-	
+
 	ut_ad(index);
 	ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
 
@@ -509,7 +511,7 @@
 	}
 
 	n_fields = dict_index_get_n_fields(index);
-	
+
 	for (pos = 0; pos < n_fields; pos++) {
 		field = dict_index_get_nth_field(index, pos);
 
@@ -537,7 +539,7 @@
 	dict_col_t*	col;
 	ulint		pos;
 	ulint		n_fields;
-	
+
 	ut_ad(index);
 	ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
 
@@ -549,7 +551,7 @@
 	col = dict_table_get_nth_col(index->table, n);
 
 	n_fields = dict_index_get_n_fields(index);
-	
+
 	for (pos = 0; pos < n_fields; pos++) {
 		field = dict_index_get_nth_field(index, pos);
 
@@ -582,21 +584,21 @@
 	dict_field_t*	field2;
 	ulint		n_fields;
 	ulint		pos;
-	
+
 	ut_ad(index);
 	ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
 
 	field2 = dict_index_get_nth_field(index2, n);
 
 	n_fields = dict_index_get_n_fields(index);
-	
+
 	for (pos = 0; pos < n_fields; pos++) {
 		field = dict_index_get_nth_field(index, pos);
 
 		if (field->col == field2->col
-		    && (field->prefix_len == 0
+			&& (field->prefix_len == 0
 			|| (field->prefix_len >= field2->prefix_len
-			    && field2->prefix_len != 0))) {
+				&& field2->prefix_len != 0))) {
 
 			return(pos);
 		}
@@ -616,7 +618,7 @@
 	trx_t*	trx)		/* in: transaction handle */
 {
 	dict_table_t*	table;
-	
+
 	if (ut_dulint_cmp(table_id, DICT_FIELDS_ID) <= 0
 	   || trx->dict_operation_lock_mode == RW_X_LATCH) {
 		/* It is a system table which will always exist in the table
@@ -634,7 +636,7 @@
 	mutex_enter(&(dict_sys->mutex));
 
 	table = dict_table_get_on_id_low(table_id, trx);
-	
+
 	mutex_exit(&(dict_sys->mutex));
 
 	return(table);
@@ -672,7 +674,7 @@
 	dict_col_t*	col;
 	ulint		pos;
 	ulint		n_fields;
-	
+
 	ut_ad(table);
 
 	col = dict_table_get_nth_col(table, n);
@@ -680,7 +682,7 @@
 	index = dict_table_get_first_index(table);
 
 	n_fields = dict_index_get_n_unique(index);
-	
+
 	for (pos = 0; pos < n_fields; pos++) {
 		field = dict_index_get_nth_field(index, pos);
 
@@ -745,17 +747,17 @@
 	UT_NOT_USED(trx);
 
 	mutex_enter(&(dict_sys->mutex));
-	
+
 	table = dict_table_get_low(table_name);
 
 	mutex_exit(&(dict_sys->mutex));
 
 	if (table != NULL) {
-	        if (!table->stat_initialized) {
+		if (!table->stat_initialized) {
 			dict_update_statistics(table);
 		}
 	}
-	
+
 	return(table);
 }
 
@@ -775,22 +777,22 @@
 	UT_NOT_USED(trx);
 
 	mutex_enter(&(dict_sys->mutex));
-	
+
 	table = dict_table_get_low(table_name);
 
 	if (table != NULL) {
 
-	        table->n_mysql_handles_opened++;
+		table->n_mysql_handles_opened++;
 	}
 
 	mutex_exit(&(dict_sys->mutex));
 
 	if (table != NULL) {
-	        if (!table->stat_initialized && !table->ibd_file_missing) {
+		if (!table->stat_initialized && !table->ibd_file_missing) {
 			dict_update_statistics(table);
 		}
 	}
-	
+
 	return(table);
 }
 
@@ -805,7 +807,7 @@
 	ulint	fold;
 	ulint	id_fold;
 	ulint	i;
-	
+
 	ut_ad(table);
 #ifdef UNIV_SYNC_DEBUG
 	ut_ad(mutex_own(&(dict_sys->mutex)));
@@ -813,12 +815,12 @@
 	ut_ad(table->n_def == table->n_cols - DATA_N_SYS_COLS);
 	ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
 	ut_ad(table->cached == FALSE);
-	
+
 	fold = ut_fold_string(table->name);
 	id_fold = ut_fold_dulint(table->id);
-	
+
 	table->cached = TRUE;
-	
+
 	/* NOTE: the system columns MUST be added in the following order
 	(so that they can be indexed by the numerical value of DATA_ROW_ID,
 	etc.) and as the last columns of the table memory object.
@@ -847,7 +849,7 @@
 #endif
 
 	/* This check reminds that if a new system column is added to
-	the program, it should be dealt with here */ 
+	the program, it should be dealt with here */
 #if DATA_N_SYS_COLS != 4
 #error "DATA_N_SYS_COLS != 4"
 #endif
@@ -909,7 +911,7 @@
 {
 	dict_table_t*	table;
 	dict_index_t*	index;
-	
+
 	table = UT_LIST_GET_FIRST(dict_sys->table_LRU);
 
 	while (table) {
@@ -951,16 +953,16 @@
 	char*		old_name;
 	ibool		success;
 	ulint		i;
-	
+
 	ut_ad(table);
 #ifdef UNIV_SYNC_DEBUG
 	ut_ad(mutex_own(&(dict_sys->mutex)));
 #endif /* UNIV_SYNC_DEBUG */
 
 	old_size = mem_heap_get_size(table->heap);
-	
+
 	fold = ut_fold_string(new_name);
-	
+
 	/* Look for a table with the same name: error if such exists */
 	{
 		dict_table_t*	table2;
@@ -969,7 +971,7 @@
 		if (table2) {
 			fprintf(stderr,
 "InnoDB: Error: dictionary cache already contains a table of name %s\n",
-	 							     new_name);
+				new_name);
 			return(FALSE);
 		}
 	}
@@ -1018,7 +1020,7 @@
 
 	while (index != NULL) {
 		index->table_name = table->name;
-		
+
 		index = dict_table_get_next_index(index);
 	}
 
@@ -1030,7 +1032,7 @@
 		constraints from the dictionary cache here. The foreign key
 		constraints will be inherited to the new table from the
 		system tables through a call of dict_load_foreigns. */
-	
+
 		/* Remove the foreign constraints from the cache */
 		foreign = UT_LIST_GET_LAST(table->foreign_list);
 
@@ -1046,14 +1048,14 @@
 		while (foreign != NULL) {
 			foreign->referenced_table = NULL;
 			foreign->referenced_index = NULL;
-		
+
 			foreign = UT_LIST_GET_NEXT(referenced_list, foreign);
 		}
 
 		/* Make the list of referencing constraints empty */
 
 		UT_LIST_INIT(table->referenced_list);
-		
+
 		return(TRUE);
 	}
 
@@ -1085,10 +1087,10 @@
 			old_id = mem_strdup(foreign->id);
 
 			if (ut_strlen(foreign->id) > ut_strlen(old_name)
-						+ ((sizeof dict_ibfk) - 1)
-			    && 0 == ut_memcmp(foreign->id, old_name,
-						ut_strlen(old_name))
-			    && 0 == ut_memcmp(
+				+ ((sizeof dict_ibfk) - 1)
+				&& 0 == ut_memcmp(foreign->id, old_name,
+					ut_strlen(old_name))
+				&& 0 == ut_memcmp(
 					foreign->id + ut_strlen(old_name),
 					dict_ibfk, (sizeof dict_ibfk) - 1)) {
 
@@ -1096,11 +1098,11 @@
 
 				if (ut_strlen(table->name) > ut_strlen(old_name)) {
 					foreign->id = mem_heap_alloc(
-					     foreign->heap,
+						foreign->heap,
 						ut_strlen(table->name)
 						+ ut_strlen(old_id) + 1);
 				}
-				
+
 				/* Replace the prefix 'databasename/tablename'
 				with the new names */
 				strcpy(foreign->id, table->name);
@@ -1112,16 +1114,16 @@
 				db_len = dict_get_db_name_len(table->name) + 1;
 
 				if (dict_get_db_name_len(table->name)
-			    	    > dict_get_db_name_len(foreign->id)) {
+					> dict_get_db_name_len(foreign->id)) {
 
 					foreign->id = mem_heap_alloc(
-					     foreign->heap,
-				 	     db_len + ut_strlen(old_id) + 1);
+						foreign->heap,
+						db_len + ut_strlen(old_id) + 1);
 				}
 
 				/* Replace the database prefix in id with the
 				one from table->name */
-			
+
 				ut_memcpy(foreign->id, table->name, db_len);
 
 				strcpy(foreign->id + db_len,
@@ -1194,7 +1196,7 @@
 	dict_index_t*	index;
 	ulint		size;
 	ulint		i;
-	
+
 	ut_ad(table);
 #ifdef UNIV_SYNC_DEBUG
 	ut_ad(mutex_own(&(dict_sys->mutex)));
@@ -1222,7 +1224,7 @@
 	while (foreign != NULL) {
 		foreign->referenced_table = NULL;
 		foreign->referenced_index = NULL;
-		
+
 		foreign = UT_LIST_GET_NEXT(referenced_list, foreign);
 	}
 
@@ -1249,13 +1251,15 @@
 	/* Remove table from LRU list of tables */
 	UT_LIST_REMOVE(table_LRU, dict_sys->table_LRU, table);
 
+	mutex_free(&(table->autoinc_mutex));
+
 	size = mem_heap_get_size(table->heap);
 
 	ut_ad(dict_sys->size >= size);
 
 	dict_sys->size -= size;
 
-	dict_mem_table_free(table);
+	mem_heap_free(table->heap);
 }
 
 /**************************************************************************
@@ -1306,7 +1310,7 @@
 	ut_ad(mutex_own(&(dict_sys->mutex)));
 #endif /* UNIV_SYNC_DEBUG */
 	ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
-	
+
 	fold = ut_fold_ulint_pair(ut_fold_string(table->name),
 				  ut_fold_string(col->name));
 
@@ -1316,7 +1320,7 @@
 		HASH_SEARCH(hash, dict_sys->col_hash, fold, col2,
 			(ut_strcmp(col->name, col2->name) == 0)
 			&& (ut_strcmp((col2->table)->name, table->name)
-							== 0));  
+							== 0));
 		ut_a(col2 == NULL);
 	}
 
@@ -1339,7 +1343,7 @@
 	ut_ad(mutex_own(&(dict_sys->mutex)));
 #endif /* UNIV_SYNC_DEBUG */
 	ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
-	
+
 	fold = ut_fold_ulint_pair(ut_fold_string(table->name),
 				  ut_fold_string(col->name));
 
@@ -1364,7 +1368,7 @@
 	ut_ad(mutex_own(&(dict_sys->mutex)));
 #endif /* UNIV_SYNC_DEBUG */
 	ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
-	
+
 	fold = ut_fold_ulint_pair(ut_fold_string(table->name),
 				  ut_fold_string(col->name));
 
@@ -1372,40 +1376,8 @@
 
 	fold = ut_fold_ulint_pair(ut_fold_string(new_name),
 				  ut_fold_string(col->name));
-				  
-	HASH_INSERT(dict_col_t, hash, dict_sys->col_hash, fold, col);
-}
 
-/********************************************************************
-If the given column name is reserved for InnoDB system columns, return
-TRUE. */
-
-ibool
-dict_col_name_is_reserved(
-/*======================*/
-				/* out: TRUE if name is reserved */
-	const char*	name)	/* in: column name */
-{
-	/* This check reminds that if a new system column is added to
-	the program, it should be dealt with here. */
-#if DATA_N_SYS_COLS != 4
-#error "DATA_N_SYS_COLS != 4"
-#endif
-
-	static const char*	reserved_names[] = {
-		"DB_ROW_ID", "DB_TRX_ID", "DB_ROLL_PTR", "DB_MIX_ID"
-	};
-
-	ulint			i;
-
-	for (i = 0; i < UT_ARR_SIZE(reserved_names); i++) {
-		if (strcmp(name, reserved_names[i]) == 0) {
-
-			return(TRUE);
-		}
-	}
-
-	return(FALSE);
+	HASH_INSERT(dict_col_t, hash, dict_sys->col_hash, fold, col);
 }
 
 /**************************************************************************
@@ -1427,14 +1399,14 @@
 	ulint		n_ord;
 	ibool		success;
 	ulint		i;
-	
+
 	ut_ad(index);
 #ifdef UNIV_SYNC_DEBUG
 	ut_ad(mutex_own(&(dict_sys->mutex)));
 #endif /* UNIV_SYNC_DEBUG */
 	ut_ad(index->n_def == index->n_fields);
 	ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
-	
+
 	ut_ad(mem_heap_validate(index->heap));
 
 	{
@@ -1448,7 +1420,7 @@
 		}
 
 		ut_a(UT_LIST_GET_LEN(table->indexes) == 0
-	      			|| (index->type & DICT_CLUSTERED) == 0);
+				|| (index->type & DICT_CLUSTERED) == 0);
 	}
 
 	success = dict_index_find_cols(table, index);
@@ -1458,7 +1430,7 @@
 
 		return(FALSE);
 	}
-	
+
 	/* Build the cache internal representation of the index,
 	containing also the added system fields */
 
@@ -1469,15 +1441,15 @@
 	}
 
 	new_index->search_info = btr_search_info_create(new_index->heap);
-	
+
 	/* Set the n_fields value in new_index to the actual defined
 	number of fields in the cache internal representation */
 
 	new_index->n_fields = new_index->n_def;
-	
+
 	/* Add the new index as the last index for the table */
 
-	UT_LIST_ADD_LAST(indexes, table->indexes, new_index);	
+	UT_LIST_ADD_LAST(indexes, table->indexes, new_index);
 	new_index->table = table;
 	new_index->table_name = table->name;
 
@@ -1498,7 +1470,7 @@
 
 	if (table->type == DICT_TABLE_CLUSTER_MEMBER) {
 		/* The index tree is found from the cluster object */
-	    
+
 		cluster = dict_table_get_low(table->cluster_name);
 
 		tree = dict_index_get_tree(
@@ -1526,10 +1498,10 @@
 			new_index->stat_n_diff_key_vals[i] = 100;
 		}
 	}
-	
+
 	/* Add the index to the list of indexes stored in the tree */
-	UT_LIST_ADD_LAST(tree_indexes, tree->tree_indexes, new_index); 
-	
+	UT_LIST_ADD_LAST(tree_indexes, tree->tree_indexes, new_index);
+
 	/* If the dictionary cache grows too big, trim the table LRU list */
 
 	dict_sys->size += mem_heap_get_size(new_index->heap);
@@ -1581,7 +1553,7 @@
 
 	dict_sys->size -= size;
 
-	dict_mem_index_free(index);
+	mem_heap_free(index->heap);
 }
 
 /***********************************************************************
@@ -1593,13 +1565,13 @@
 /*=================*/
 				/* out: TRUE if success */
 	dict_table_t*	table,	/* in: table */
-	dict_index_t*	index)	/* in: index */	
+	dict_index_t*	index)	/* in: index */
 {
 	dict_col_t*	col;
 	dict_field_t*	field;
 	ulint		fold;
 	ulint		i;
-	
+
 	ut_ad(table && index);
 	ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
 #ifdef UNIV_SYNC_DEBUG
@@ -1610,15 +1582,15 @@
 		field = dict_index_get_nth_field(index, i);
 
 		fold = ut_fold_ulint_pair(ut_fold_string(table->name),
-				  	       ut_fold_string(field->name));
-			
+			ut_fold_string(field->name));
+
 		HASH_SEARCH(hash, dict_sys->col_hash, fold, col,
 				(ut_strcmp(col->name, field->name) == 0)
 				&& (ut_strcmp((col->table)->name, table->name)
-								== 0));  
+								== 0));
 		if (col == NULL) {
 
- 			return(FALSE);
+			return(FALSE);
 		} else {
 			field->col = col;
 		}
@@ -1626,7 +1598,7 @@
 
 	return(TRUE);
 }
-	
+
 /***********************************************************************
 Adds a column to index. */
 
@@ -1635,12 +1607,11 @@
 /*===============*/
 	dict_index_t*	index,		/* in: index */
 	dict_col_t*	col,		/* in: column */
-	ulint		order,		/* in: order criterion */
 	ulint		prefix_len)	/* in: column prefix length */
 {
 	dict_field_t*	field;
 
-	dict_mem_index_add_field(index, col->name, order, prefix_len);
+	dict_mem_index_add_field(index, col->name, prefix_len);
 
 	field = dict_index_get_nth_field(index, index->n_def - 1);
 
@@ -1662,17 +1633,6 @@
 	if (!(dtype_get_prtype(&col->type) & DATA_NOT_NULL)) {
 		index->n_nullable++;
 	}
-
-	if (index->n_def > 1) {
-		const dict_field_t*	field2 =
-			dict_index_get_nth_field(index, index->n_def - 2);
-		field->fixed_offs = (!field2->fixed_len ||
-					field2->fixed_offs == ULINT_UNDEFINED)
-				? ULINT_UNDEFINED
-				: field2->fixed_len + field2->fixed_offs;
-	} else {
-		field->fixed_offs = 0;
-	}
 }
 
 /***********************************************************************
@@ -1688,14 +1648,13 @@
 {
 	dict_field_t*	field;
 	ulint		i;
-	
+
 	/* Copy fields contained in index2 */
 
 	for (i = start; i < end; i++) {
 
 		field = dict_index_get_nth_field(index2, i);
-		dict_index_add_col(index1, field->col, field->order,
-						      field->prefix_len);
+		dict_index_add_col(index1, field->col, field->prefix_len);
 	}
 }
 
@@ -1762,7 +1721,7 @@
 				of the clustered index */
 	dict_table_t*	table,	/* in: table */
 	dict_index_t*	index)	/* in: user representation of a clustered
-				index */	
+				index */
 {
 	dict_index_t*	new_index;
 	dict_field_t*	field;
@@ -1780,26 +1739,24 @@
 
 	/* Create a new index object with certainly enough fields */
 	new_index = dict_mem_index_create(table->name,
-				     index->name,
-				     table->space,
-				     index->type,
-				     index->n_fields + table->n_cols);
+		index->name, table->space, index->type,
+		index->n_fields + table->n_cols);
 
 	/* Copy other relevant data from the old index struct to the new
 	struct: it inherits the values */
 
 	new_index->n_user_defined_cols = index->n_fields;
-	
+
 	new_index->id = index->id;
 
 	if (table->type != DICT_TABLE_ORDINARY) {
 		/* The index is mixed: copy common key prefix fields */
-		
+
 		dict_index_copy(new_index, index, 0, table->mix_len);
 
 		/* Add the mix id column */
 		dict_index_add_col(new_index,
-			  dict_table_get_sys_col(table, DATA_MIX_ID), 0, 0);
+			  dict_table_get_sys_col(table, DATA_MIX_ID), 0);
 
 		/* Copy the rest of fields */
 		dict_index_copy(new_index, index, table->mix_len,
@@ -1813,7 +1770,7 @@
 		/* No fixed number of fields determines an entry uniquely */
 
 		new_index->n_uniq = ULINT_MAX;
-		
+
 	} else if (index->type & DICT_UNIQUE) {
 		/* Only the fields defined so far are needed to identify
 		the index entry uniquely */
@@ -1831,21 +1788,27 @@
 
 		trx_id_pos = new_index->n_def;
 
-		ut_ad(DATA_ROW_ID == 0);
-		ut_ad(DATA_TRX_ID == 1);
-		ut_ad(DATA_ROLL_PTR == 2);
+#if DATA_ROW_ID != 0
+# error "DATA_ROW_ID != 0"
+#endif
+#if DATA_TRX_ID != 1
+# error "DATA_TRX_ID != 1"
+#endif
+#if DATA_ROLL_PTR != 2
+# error "DATA_ROLL_PTR != 2"
+#endif
 
 		if (!(index->type & DICT_UNIQUE)) {
 			dict_index_add_col(new_index,
-			   dict_table_get_sys_col(table, DATA_ROW_ID), 0, 0);
+			   dict_table_get_sys_col(table, DATA_ROW_ID), 0);
 			trx_id_pos++;
 		}
 
 		dict_index_add_col(new_index,
-			   dict_table_get_sys_col(table, DATA_TRX_ID), 0, 0);
-	
+			   dict_table_get_sys_col(table, DATA_TRX_ID), 0);
+
 		dict_index_add_col(new_index,
-			   dict_table_get_sys_col(table, DATA_ROLL_PTR), 0, 0);
+			   dict_table_get_sys_col(table, DATA_ROLL_PTR), 0);
 
 		for (i = 0; i < trx_id_pos; i++) {
 
@@ -1859,7 +1822,7 @@
 			}
 
 			if (dict_index_get_nth_field(new_index, i)->prefix_len
-			    > 0) {
+				> 0) {
 				new_index->trx_id_offset = 0;
 
 				break;
@@ -1887,10 +1850,10 @@
 
 		if (field->prefix_len == 0) {
 
-		        field->col->aux = 0;
+			field->col->aux = 0;
 		}
 	}
-	
+
 	/* Add to new_index non-system columns of table not yet included
 	there */
 	for (i = 0; i < table->n_cols - DATA_N_SYS_COLS; i++) {
@@ -1899,7 +1862,7 @@
 		ut_ad(col->type.mtype != DATA_SYS);
 
 		if (col->aux == ULINT_UNDEFINED) {
-			dict_index_add_col(new_index, col, 0, 0);
+			dict_index_add_col(new_index, col, 0);
 		}
 	}
 
@@ -1914,14 +1877,14 @@
 
 		if (field->prefix_len == 0) {
 
-		        field->col->clust_pos = i;
+			field->col->clust_pos = i;
 		}
 	}
-	
+
 	new_index->cached = TRUE;
 
 	return(new_index);
-}	
+}
 
 /***********************************************************************
 Builds the internal dictionary cache representation for a non-clustered
@@ -1934,7 +1897,7 @@
 				of the non-clustered index */
 	dict_table_t*	table,	/* in: table */
 	dict_index_t*	index)	/* in: user representation of a non-clustered
-				index */	
+				index */
 {
 	dict_field_t*	field;
 	dict_index_t*	new_index;
@@ -1950,24 +1913,21 @@
 
 	/* The clustered index should be the first in the list of indexes */
 	clust_index = UT_LIST_GET_FIRST(table->indexes);
-	
+
 	ut_ad(clust_index);
 	ut_ad(clust_index->type & DICT_CLUSTERED);
 	ut_ad(!(clust_index->type & DICT_UNIVERSAL));
 
 	/* Create a new index */
 	new_index = dict_mem_index_create(table->name,
-				     index->name,
-				     index->space,
-				     index->type,
-				     index->n_fields
-				     + 1 + clust_index->n_uniq);
+		index->name, index->space, index->type,
+		index->n_fields	+ 1 + clust_index->n_uniq);
 
 	/* Copy other relevant data from the old index
 	struct to the new struct: it inherits the values */
 
 	new_index->n_user_defined_cols = index->n_fields;
-	
+
 	new_index->id = index->id;
 
 	/* Copy fields from index to new_index */
@@ -1991,7 +1951,7 @@
 
 		if (field->prefix_len == 0) {
 
-		        field->col->aux = 0;
+			field->col->aux = 0;
 		}
 	}
 
@@ -2003,8 +1963,8 @@
 		field = dict_index_get_nth_field(clust_index, i);
 
 		if (field->col->aux == ULINT_UNDEFINED) {
-			dict_index_add_col(new_index, field->col, 0,
-						      field->prefix_len);
+			dict_index_add_col(new_index, field->col,
+				field->prefix_len);
 		}
 	}
 
@@ -2022,7 +1982,7 @@
 	new_index->cached = TRUE;
 
 	return(new_index);
-}	
+}
 
 /*====================== FOREIGN KEY PROCESSING ========================*/
 
@@ -2037,7 +1997,7 @@
 	dict_table_t*	table)	/* in: InnoDB table */
 {
 	if (UT_LIST_GET_LEN(table->referenced_list) > 0) {
-		
+
 		return(TRUE);
 	}
 
@@ -2067,7 +2027,7 @@
 	ut_ad(mutex_own(&(dict_sys->mutex)));
 #endif /* UNIV_SYNC_DEBUG */
 	ut_a(foreign);
-	
+
 	if (foreign->referenced_table) {
 		UT_LIST_REMOVE(referenced_list,
 			foreign->referenced_table->referenced_list, foreign);
@@ -2108,7 +2068,7 @@
 
 		foreign = UT_LIST_GET_NEXT(foreign_list, foreign);
 	}
-	
+
 	foreign = UT_LIST_GET_FIRST(table->referenced_list);
 
 	while (foreign) {
@@ -2121,7 +2081,7 @@
 	}
 
 	return(NULL);
-}	
+}
 
 /*************************************************************************
 Tries to find an index whose first fields are the columns in the array,
@@ -2144,7 +2104,7 @@
 	dict_index_t*	index;
 	const char*	col_name;
 	ulint		i;
-	
+
 	index = dict_table_get_first_index(table);
 
 	while (index != NULL) {
@@ -2157,22 +2117,22 @@
 						->prefix_len != 0) {
 					/* We do not accept column prefix
 					indexes here */
-					
+
 					break;
 				}
 
 				if (0 != innobase_strcasecmp(columns[i],
 								col_name)) {
-				  	break;
+					break;
 				}
 
 				if (types_idx && !cmp_types_are_equal(
-				     dict_index_get_nth_type(index, i),
-				     dict_index_get_nth_type(types_idx, i),
-				     check_charsets)) {
+					    dict_index_get_nth_type(index, i),
+					    dict_index_get_nth_type(types_idx, i),
+					    check_charsets)) {
 
-				  	break;
-				}		
+					break;
+				}
 			}
 
 			if (i == n_cols) {
@@ -2191,6 +2151,7 @@
 	InnoDB Hot Backup builds.  Besides, this function should never
 	be called in InnoDB Hot Backup. */
 	ut_error;
+	return(NULL);
 #endif /* UNIV_HOTBACKUP */
 }
 
@@ -2255,7 +2216,7 @@
 	dict_foreign_t*	for_in_cache		= NULL;
 	dict_index_t*	index;
 	ibool		added_to_referenced_list= FALSE;
-	FILE*		ef 			= dict_foreign_err_file;
+	FILE*		ef			= dict_foreign_err_file;
 
 #ifdef UNIV_SYNC_DEBUG
 	ut_ad(mutex_own(&(dict_sys->mutex)));
@@ -2263,7 +2224,7 @@
 
 	for_table = dict_table_check_if_in_cache_low(
 					foreign->foreign_table_name);
-	
+
 	ref_table = dict_table_check_if_in_cache_low(
 					foreign->referenced_table_name);
 	ut_a(for_table || ref_table);
@@ -2299,7 +2260,7 @@
 				mem_heap_free(foreign->heap);
 			}
 
-		    	return(DB_CANNOT_ADD_CONSTRAINT);
+			return(DB_CANNOT_ADD_CONSTRAINT);
 		}
 
 		for_in_cache->referenced_table = ref_table;
@@ -2328,11 +2289,11 @@
 						ref_table->referenced_list,
 						for_in_cache);
 				}
-			
+
 				mem_heap_free(foreign->heap);
 			}
 
-		    	return(DB_CANNOT_ADD_CONSTRAINT);
+			return(DB_CANNOT_ADD_CONSTRAINT);
 		}
 
 		for_in_cache->foreign_table = for_table;
@@ -2405,15 +2366,15 @@
 	const char*	old_ptr2;
 
 	*success = FALSE;
-	
+
 	while (isspace(*ptr)) {
 		ptr++;
 	}
 
 	old_ptr2 = ptr;
-	
+
 	ptr = dict_scan_to(ptr, string);
-	
+
 	if (*ptr == '\0' || old_ptr2 != ptr) {
 		return(old_ptr);
 	}
@@ -2483,8 +2444,8 @@
 		}
 	} else {
 		while (!isspace(*ptr) && *ptr != '(' && *ptr != ')'
-		       && (accept_also_dot || *ptr != '.')
-		       && *ptr != ',' && *ptr != '\0') {
+			&& (accept_also_dot || *ptr != '.')
+			&& *ptr != ',' && *ptr != '\0') {
 
 			ptr++;
 		}
@@ -2521,9 +2482,9 @@
 
 		b = (byte*)(*id);
 		id_len = strlen((char*) b);
-		
+
 		if (id_len >= 3 && b[id_len - 1] == 0xA0
-			       && b[id_len - 2] == 0xC2) {
+			&& b[id_len - 2] == 0xC2) {
 
 			/* Strip the 2 last bytes */
 
@@ -2566,28 +2527,29 @@
 		*success = TRUE;
 		*column = NULL;
 	} else {
-	    	for (i = 0; i < dict_table_get_n_cols(table); i++) {
+		for (i = 0; i < dict_table_get_n_cols(table); i++) {
 
 			col = dict_table_get_nth_col(table, i);
 
 			if (0 == innobase_strcasecmp(col->name, *name)) {
-		    		/* Found */
+				/* Found */
 
-		    		*success = TRUE;
-		    		*column = col;
-		    		strcpy((char*) *name, col->name);
+				*success = TRUE;
+				*column = col;
+				strcpy((char*) *name, col->name);
 
-		    		break;
+				break;
 			}
 		}
 	}
-	
+
 	return(ptr);
 #else /* UNIV_HOTBACKUP */
 	/* This function depends on MySQL code that is not included in
 	InnoDB Hot Backup builds.  Besides, this function should never
 	be called in InnoDB Hot Backup. */
 	ut_error;
+	return(NULL);
 #endif /* UNIV_HOTBACKUP */
 }
 
@@ -2616,11 +2578,11 @@
 
 	*success = FALSE;
 	*table = NULL;
-	
-	ptr = dict_scan_id(ptr, heap, &scan_name, FALSE);	
+
+	ptr = dict_scan_id(ptr, heap, &scan_name, FALSE);
 
 	if (scan_name == NULL) {
-		
+
 		return(ptr);	/* Syntax error */
 	}
 
@@ -2693,6 +2655,7 @@
 	InnoDB Hot Backup builds.  Besides, this function should never
 	be called in InnoDB Hot Backup. */
 	ut_error;
+	return(NULL);
 #endif /* UNIV_HOTBACKUP */
 }
 
@@ -2708,7 +2671,7 @@
 				left in string or a syntax error */
 {
 	const char*	start;
-	
+
 	*success = FALSE;
 
 	ptr = dict_scan_id(ptr, NULL, &start, TRUE);
@@ -2716,7 +2679,7 @@
 	if (start) {
 		*success = TRUE;
 	}
-	
+
 	return(ptr);
 }
 
@@ -2738,8 +2701,8 @@
 	char*		str;
 	const char*	sptr;
 	char*		ptr;
- 	/* unclosed quote character (0 if none) */
- 	char		quote	= 0;
+	/* unclosed quote character (0 if none) */
+	char		quote	= 0;
 
 	str = mem_alloc(strlen(sql_string) + 1);
 
@@ -2767,15 +2730,15 @@
 			/* Starting quote: remember the quote character. */
 			quote = *sptr;
 		} else if (*sptr == '#'
-                           || (sptr[0] == '-' && sptr[1] == '-' &&
-                               sptr[2] == ' ')) {
+			|| (sptr[0] == '-' && sptr[1] == '-' &&
+				sptr[2] == ' ')) {
 			for (;;) {
 				/* In Unix a newline is 0x0A while in Windows
 				it is 0x0D followed by 0x0A */
 
 				if (*sptr == (char)0x0A
-				    || *sptr == (char)0x0D
-				    || *sptr == '\0') {
+					|| *sptr == (char)0x0D
+					|| *sptr == '\0') {
 
 					goto scan_more;
 				}
@@ -2786,7 +2749,7 @@
 			for (;;) {
 				if (*sptr == '*' && *(sptr + 1) == '/') {
 
-				     	sptr += 2;
+					sptr += 2;
 
 					goto scan_more;
 				}
@@ -2832,10 +2795,10 @@
 
 	while (foreign) {
 		if (ut_strlen(foreign->id) > ((sizeof dict_ibfk) - 1) + len
-		    && 0 == ut_memcmp(foreign->id, table->name, len)
-		    && 0 == ut_memcmp(foreign->id + len,
+			&& 0 == ut_memcmp(foreign->id, table->name, len)
+			&& 0 == ut_memcmp(foreign->id + len,
 				dict_ibfk, (sizeof dict_ibfk) - 1)
-		    && foreign->id[len + ((sizeof dict_ibfk) - 1)] != '0') {
+			&& foreign->id[len + ((sizeof dict_ibfk) - 1)] != '0') {
 			/* It is of the >= 4.0.18 format */
 
 			id = strtoul(foreign->id + len + ((sizeof dict_ibfk) - 1),
@@ -2909,7 +2872,7 @@
 	ulint		highest_id_so_far	= 0;
 	dict_index_t*	index;
 	dict_foreign_t*	foreign;
- 	const char*	ptr			= sql_string;
+	const char*	ptr			= sql_string;
 	const char*	start_of_latest_foreign	= sql_string;
 	FILE*		ef			= dict_foreign_err_file;
 	const char*	constraint_name;
@@ -2925,7 +2888,7 @@
 	dict_col_t*	columns[500];
 	const char*	column_names[500];
 	const char*	referenced_table_name;
-	
+
 #ifdef UNIV_SYNC_DEBUG
 	ut_ad(mutex_own(&(dict_sys->mutex)));
 #endif /* UNIV_SYNC_DEBUG */
@@ -3007,7 +2970,7 @@
 		ut_a(success);
 
 		if (!isspace(*ptr) && *ptr != '"' && *ptr != '`') {
-	        	goto loop;
+			goto loop;
 		}
 
 		while (isspace(*ptr)) {
@@ -3030,15 +2993,14 @@
 		   command, determine if there are any foreign keys, and
 		   if so, immediately reject the command if the table is a
 		   temporary one. For now, this kludge will work. */
-		if (reject_fks && (UT_LIST_GET_LEN(table->foreign_list) > 0))
-		{
+		if (reject_fks && (UT_LIST_GET_LEN(table->foreign_list) > 0)) {
 			return DB_CANNOT_ADD_CONSTRAINT;
 		}
-		
+
 		/**********************************************************/
 		/* The following call adds the foreign key constraints
 		to the data dictionary system tables on disk */
-		
+
 		error = dict_create_add_foreigns_to_dictionary(
 						highest_id_so_far, table, trx);
 		return(error);
@@ -3046,14 +3008,14 @@
 
 	start_of_latest_foreign = ptr;
 
-	ptr = dict_accept(ptr, "FOREIGN", &success);		
-	
+	ptr = dict_accept(ptr, "FOREIGN", &success);
+
 	if (!success) {
 		goto loop;
 	}
 
 	if (!isspace(*ptr)) {
-	        goto loop;
+		goto loop;
 	}
 
 	ptr = dict_accept(ptr, "KEY", &success);
@@ -3082,7 +3044,7 @@
 			/* We do not flag a syntax error here because in an
 			ALTER TABLE we may also have DROP FOREIGN KEY abc */
 
-		        goto loop;
+			goto loop;
 		}
 	}
 
@@ -3104,13 +3066,13 @@
 	}
 
 	i++;
-	
+
 	ptr = dict_accept(ptr, ",", &success);
 
 	if (success) {
 		goto col_loop1;
 	}
-	
+
 	ptr = dict_accept(ptr, ")", &success);
 
 	if (!success) {
@@ -3151,7 +3113,7 @@
 	foreign = dict_mem_foreign_create();
 
 	if (constraint_name) {
-		ulint	db_len;	
+		ulint	db_len;
 
 		/* Catenate 'databasename/' to the constraint name specified
 		by the user: we conceive the constraint as belonging to the
@@ -3179,7 +3141,7 @@
 		foreign->foreign_col_names[i] =
 			mem_heap_strdup(foreign->heap, columns[i]->name);
 	}
-	
+
 	ptr = dict_scan_table_name(ptr, &referenced_table, name,
 				&success, heap, &referenced_table_name);
 
@@ -3198,7 +3160,7 @@
 
 		return(DB_CANNOT_ADD_CONSTRAINT);
 	}
-	
+
 	ptr = dict_accept(ptr, "(", &success);
 
 	if (!success) {
@@ -3215,7 +3177,7 @@
 	ptr = dict_scan_col(ptr, &success, referenced_table, columns + i,
 				heap, column_names + i);
 	i++;
-	
+
 	if (!success) {
 		dict_foreign_free(foreign);
 
@@ -3234,12 +3196,12 @@
 	if (success) {
 		goto col_loop2;
 	}
-	
+
 	ptr = dict_accept(ptr, ")", &success);
 
 	if (!success || foreign->n_fields != i) {
 		dict_foreign_free(foreign);
-		
+
 		dict_foreign_report_syntax_err(name, start_of_latest_foreign,
 									ptr);
 		return(DB_CANNOT_ADD_CONSTRAINT);
@@ -3247,7 +3209,7 @@
 
 	n_on_deletes = 0;
 	n_on_updates = 0;
-	
+
 scan_on_conditions:
 	/* Loop here as long as we can find ON ... conditions */
 
@@ -3265,7 +3227,7 @@
 
 		if (!success) {
 			dict_foreign_free(foreign);
-		
+
 			dict_foreign_report_syntax_err(name,
 						start_of_latest_foreign, ptr);
 			return(DB_CANNOT_ADD_CONSTRAINT);
@@ -3305,7 +3267,7 @@
 			dict_foreign_free(foreign);
 			dict_foreign_report_syntax_err(name,
 					start_of_latest_foreign, ptr);
-		
+
 			return(DB_CANNOT_ADD_CONSTRAINT);
 		}
 
@@ -3362,13 +3324,13 @@
 	} else {
 		foreign->type |= DICT_FOREIGN_ON_UPDATE_SET_NULL;
 	}
-	
+
 	goto scan_on_conditions;
 
 try_find_index:
 	if (n_on_deletes > 1 || n_on_updates > 1) {
 		/* It is an error to define more than 1 action */
-		
+
 		dict_foreign_free(foreign);
 
 		mutex_enter(&dict_foreign_err_mutex);
@@ -3416,7 +3378,7 @@
 
 	foreign->referenced_table_name = mem_heap_strdup(foreign->heap,
 						referenced_table_name);
-					
+
 	foreign->referenced_col_names = mem_heap_alloc(foreign->heap,
 							i * sizeof(void*));
 	for (i = 0; i < foreign->n_fields; i++) {
@@ -3425,7 +3387,7 @@
 	}
 
 	/* We found an ok constraint definition: add to the lists */
-	
+
 	UT_LIST_ADD_LAST(foreign_list, table->foreign_list, foreign);
 
 	if (referenced_table) {
@@ -3477,7 +3439,7 @@
 	mem_heap_free(heap);
 	mem_free(str);
 
-	return(err);	
+	return(err);
 }
 
 /**************************************************************************
@@ -3505,7 +3467,7 @@
 	const char*	ptr;
 	const char*	id;
 	FILE*		ef	= dict_foreign_err_file;
-	
+
 	*n = 0;
 
 	*constraints_to_drop = mem_heap_alloc(heap, 1000 * sizeof(char*));
@@ -3521,7 +3483,7 @@
 
 	if (*ptr == '\0') {
 		mem_free(str);
-		
+
 		return(DB_SUCCESS);
 	}
 
@@ -3529,14 +3491,14 @@
 
 	if (!isspace(*ptr)) {
 
-	        goto loop;
+		goto loop;
 	}
 
 	ptr = dict_accept(ptr, "FOREIGN", &success);
-	
+
 	if (!success) {
 
-	        goto loop;
+		goto loop;
 	}
 
 	ptr = dict_accept(ptr, "KEY", &success);
@@ -3556,20 +3518,20 @@
 	ut_a(*n < 1000);
 	(*constraints_to_drop)[*n] = id;
 	(*n)++;
-	
+
 	/* Look for the given constraint id */
 
 	foreign = UT_LIST_GET_FIRST(table->foreign_list);
 
 	while (foreign != NULL) {
 		if (0 == strcmp(foreign->id, id)
-		    || (strchr(foreign->id, '/')
-			&& 0 == strcmp(id,
+			|| (strchr(foreign->id, '/')
+				&& 0 == strcmp(id,
 					dict_remove_db_name(foreign->id)))) {
 			/* Found */
 			break;
 		}
-		
+
 		foreign = UT_LIST_GET_NEXT(foreign_list, foreign);
 	}
 
@@ -3593,7 +3555,7 @@
 		return(DB_CANNOT_DROP_CONSTRAINT);
 	}
 
-	goto loop;	
+	goto loop;
 
 syntax_error:
 	mutex_enter(&dict_foreign_err_mutex);
@@ -3630,7 +3592,7 @@
 	}
 
 	mutex_enter(&(dict_sys->mutex));
-	
+
 	table = UT_LIST_GET_FIRST(dict_sys->table_LRU);
 
 	while (table) {
@@ -3678,7 +3640,7 @@
 	tree->page = page_no;
 
 	tree->id = index->id;
-	
+
 	UT_LIST_INIT(tree->tree_indexes);
 
 	tree->magic_n = DICT_TREE_MAGIC_N;
@@ -3720,16 +3682,16 @@
 	dict_table_t*	table;
 	dulint		mix_id;
 	ulint		len;
-	
+
 	index = UT_LIST_GET_FIRST(tree->tree_indexes);
 	ut_ad(index);
 	table = index->table;
-	
+
 	if ((index->type & DICT_CLUSTERED)
 			&& UNIV_UNLIKELY(table->type != DICT_TABLE_ORDINARY)) {
 
 		/* Get the mix id of the record */
-		ut_a(!table->comp);
+		ut_a(!dict_table_is_comp(table));
 
 		mix_id = mach_dulint_read_compressed(
 			rec_get_nth_field_old(rec, table->mix_len, &len));
@@ -3757,9 +3719,9 @@
 				index */
 {
 	dict_index_t*	index;
-	
+
 	index = dict_tree_find_index_low(tree, rec);
-	
+
 	return(index);
 }
 
@@ -3780,7 +3742,7 @@
 	dulint		mix_id;
 
 	ut_ad(dtuple_check_typed(tuple));
-	
+
 	if (UT_LIST_GET_LEN(tree->tree_indexes) == 1) {
 
 		return(UT_LIST_GET_FIRST(tree->tree_indexes));
@@ -3825,7 +3787,7 @@
 	byte*	mix_id_field;
 	ulint	len;
 
-	ut_ad(!table->comp);
+	ut_ad(!dict_table_is_comp(table));
 
 	mix_id_field = rec_get_nth_field_old(rec,
 					table->mix_len, &len);
@@ -3871,7 +3833,7 @@
 				pointer */
 	ulint		page_no,/* in: page number to put in node pointer */
 	mem_heap_t*	heap,	/* in: memory heap where pointer created */
-	ulint           level)  /* in: level of rec in tree: 0 means leaf
+	ulint		level)	/* in: level of rec in tree: 0 means leaf
 				level */
 {
 	dtuple_t*	tuple;
@@ -3881,21 +3843,21 @@
 	ulint		n_unique;
 
 	ind = dict_tree_find_index_low(tree, rec);
-	
+
 	if (UNIV_UNLIKELY(tree->type & DICT_UNIVERSAL)) {
 		/* In a universal index tree, we take the whole record as
 		the node pointer if the reord is on the leaf level,
 		on non-leaf levels we remove the last field, which
 		contains the page number of the child page */
 
-		ut_a(!ind->table->comp);
+		ut_a(!dict_table_is_comp(ind->table));
 		n_unique = rec_get_n_fields_old(rec);
 
 		if (level > 0) {
-		        ut_a(n_unique > 1);
-		        n_unique--;
+			ut_a(n_unique > 1);
+			n_unique--;
 		}
-	} else {	
+	} else {
 		n_unique = dict_index_get_n_unique_in_tree(ind);
 	}
 
@@ -3906,15 +3868,15 @@
 	levels in the tree there may be identical node pointers with a
 	different page number; therefore, we set the n_fields_cmp to one
 	less: */
-	
+
 	dtuple_set_n_fields_cmp(tuple, n_unique);
 
 	dict_index_copy_types(tuple, ind, n_unique);
-	
+
 	buf = mem_heap_alloc(heap, 4);
 
 	mach_write_to_4(buf, page_no);
-	
+
 	field = dtuple_get_nth_field(tuple, n_unique);
 	dfield_set_data(field, buf, 4);
 
@@ -3927,8 +3889,8 @@
 	ut_ad(dtuple_check_typed(tuple));
 
 	return(tuple);
-}	
-	
+}
+
 /**************************************************************************
 Copies an initial segment of a physical record, long enough to specify an
 index entry uniquely. */
@@ -3951,7 +3913,7 @@
 	index = dict_tree_find_index_low(tree, rec);
 
 	if (UNIV_UNLIKELY(tree->type & DICT_UNIVERSAL)) {
-		ut_a(!index->table->comp);
+		ut_a(!dict_table_is_comp(index->table));
 		n = rec_get_n_fields_old(rec);
 	} else {
 		n = dict_index_get_n_unique_in_tree(index);
@@ -3978,9 +3940,10 @@
 
 	ind = dict_tree_find_index_low(tree, rec);
 
-	ut_ad(ind->table->comp || n_fields <= rec_get_n_fields_old(rec));
-	
-	tuple = dtuple_create(heap, n_fields); 
+	ut_ad(dict_table_is_comp(ind->table)
+		|| n_fields <= rec_get_n_fields_old(rec));
+
+	tuple = dtuple_create(heap, n_fields);
 
 	dict_index_copy_types(tuple, ind, n_fields);
 
@@ -3989,8 +3952,8 @@
 	ut_ad(dtuple_check_typed(tuple));
 
 	return(tuple);
-}	
-	
+}
+
 /*************************************************************************
 Calculates the minimum record length in an index. */
 
@@ -4002,7 +3965,7 @@
 	ulint	sum	= 0;
 	ulint	i;
 
-	if (UNIV_LIKELY(index->table->comp)) {
+	if (dict_table_is_comp(index->table)) {
 		ulint nullable = 0;
 		sum = REC_N_NEW_EXTRA_BYTES;
 		for (i = 0; i < dict_index_get_n_fields(index); i++) {
@@ -4047,8 +4010,8 @@
 /*=======================*/
 	dict_table_t*	table,		/* in: table */
 	ibool		has_dict_mutex __attribute__((unused)))
-                                        /* in: TRUE if the caller has the
-					dictionary mutex */	
+					/* in: TRUE if the caller has the
+					dictionary mutex */
 {
 	dict_index_t*	index;
 	ulint		size;
@@ -4077,11 +4040,11 @@
 	/* Find out the sizes of the indexes and how many different values
 	for the key they approximately have */
 
-	index = dict_table_get_first_index(table);	
+	index = dict_table_get_first_index(table);
 
 	if (index == NULL) {
 		/* Table definition is corrupt */
-	
+
 		return;
 	}
 
@@ -4100,7 +4063,7 @@
 		}
 
 		index->stat_n_leaf_pages = size;
-		
+
 		btr_estimate_number_of_different_key_vals(index);
 
 		index = dict_table_get_next_index(index);
@@ -4118,7 +4081,7 @@
 
 	table->stat_initialized = TRUE;
 
-        table->stat_modified_counter = 0;
+	table->stat_modified_counter = 0;
 }
 
 /*************************************************************************
@@ -4157,7 +4120,7 @@
 	fprintf(stderr, " )\n"
 		"             REFERENCES %s (",
 		foreign->referenced_table_name);
-	
+
 	for (i = 0; i < foreign->n_fields; i++) {
 		fprintf(stderr, " %s", foreign->referenced_col_names[i]);
 	}
@@ -4193,7 +4156,7 @@
 	table = dict_table_get_low(name);
 
 	ut_a(table);
-	
+
 	dict_table_print_low(table);
 	mutex_exit(&(dict_sys->mutex));
 }
@@ -4215,7 +4178,7 @@
 #endif /* UNIV_SYNC_DEBUG */
 
 	dict_update_statistics_low(table, TRUE);
-	
+
 	fprintf(stderr,
 "--------------------------------------\n"
 "TABLE: name %s, id %lu %lu, columns %lu, indexes %lu, appr.rows %lu\n"
@@ -4224,7 +4187,7 @@
 			(ulong) ut_dulint_get_high(table->id),
 			(ulong) ut_dulint_get_low(table->id),
 			(ulong) table->n_cols,
-		        (ulong) UT_LIST_GET_LEN(table->indexes),
+			(ulong) UT_LIST_GET_LEN(table->indexes),
 			(ulong) table->stat_n_rows);
 
 	for (i = 0; i < table->n_cols - 1; i++) {
@@ -4302,7 +4265,7 @@
 	}
 
 	fprintf(stderr,
-		"  INDEX: name %s, id %lu %lu, fields %lu/%lu, type %lu\n"
+		"  INDEX: name %s, id %lu %lu, fields %lu/%lu, uniq %lu, type %lu\n"
 		"   root page %lu, appr.key vals %lu,"
 		" leaf pages %lu, size pages %lu\n"
 		"   FIELDS: ",
@@ -4310,12 +4273,14 @@
 		(ulong) ut_dulint_get_high(tree->id),
 		(ulong) ut_dulint_get_low(tree->id),
 		(ulong) index->n_user_defined_cols,
-		(ulong) index->n_fields, (ulong) index->type,
+		(ulong) index->n_fields,
+		(ulong) index->n_uniq,
+		(ulong) index->type,
 		(ulong) tree->page,
 		(ulong) n_vals,
 		(ulong) index->stat_n_leaf_pages,
 		(ulong) index->stat_index_size);
-			
+
 	for (i = 0; i < index->n_fields; i++) {
 		dict_field_print_low(dict_index_get_nth_field(index, i));
 	}
@@ -4361,7 +4326,7 @@
 {
 	const char*	stripped_id;
 	ulint	i;
-	
+
 	if (strchr(foreign->id, '/')) {
 		/* Strip the preceding database name from the constraint id */
 		stripped_id = foreign->id + 1
@@ -4371,14 +4336,14 @@
 	}
 
 	putc(',', file);
-	
+
 	if (add_newline) {
 		/* SHOW CREATE TABLE wants constraints each printed nicely
 		on its own line, while error messages want no newlines
 		inserted. */
 		fputs("\n ", file);
 	}
-	
+
 	fputs(" CONSTRAINT ", file);
 	ut_print_name(file, trx, stripped_id);
 	fputs(" FOREIGN KEY (", file);
@@ -4387,7 +4352,7 @@
 		ut_print_name(file, trx, foreign->foreign_col_names[i]);
 		if (++i < foreign->n_fields) {
 			fputs(", ", file);
-	        } else {
+		} else {
 			break;
 		}
 	}
@@ -4430,7 +4395,7 @@
 	if (foreign->type & DICT_FOREIGN_ON_DELETE_CASCADE) {
 		fputs(" ON DELETE CASCADE", file);
 	}
-	
+
 	if (foreign->type & DICT_FOREIGN_ON_DELETE_SET_NULL) {
 		fputs(" ON DELETE SET NULL", file);
 	}
@@ -4442,7 +4407,7 @@
 	if (foreign->type & DICT_FOREIGN_ON_UPDATE_CASCADE) {
 		fputs(" ON UPDATE CASCADE", file);
 	}
-	
+
 	if (foreign->type & DICT_FOREIGN_ON_UPDATE_SET_NULL) {
 		fputs(" ON UPDATE SET NULL", file);
 	}
@@ -4513,7 +4478,7 @@
 			if (foreign->type == DICT_FOREIGN_ON_DELETE_CASCADE) {
 				fputs(" ON DELETE CASCADE", file);
 			}
-	
+
 			if (foreign->type == DICT_FOREIGN_ON_DELETE_SET_NULL) {
 				fputs(" ON DELETE SET NULL", file);
 			}
@@ -4525,7 +4490,7 @@
 			if (foreign->type & DICT_FOREIGN_ON_UPDATE_CASCADE) {
 				fputs(" ON UPDATE CASCADE", file);
 			}
-	
+
 			if (foreign->type & DICT_FOREIGN_ON_UPDATE_SET_NULL) {
 				fputs(" ON UPDATE SET NULL", file);
 			}
@@ -4554,4 +4519,16 @@
 	ut_print_name(file, trx, index->name);
 	fputs(" of table ", file);
 	ut_print_name(file, trx, index->table_name);
+}
+
+/************************************************************************
+Export an inlined function for use in ha_innodb.c. */
+ibool
+innodb_dict_table_is_comp(
+/*===============*/
+					/* out: TRUE if table uses the
+					compact page format */
+	const dict_table_t*	table)	/* in: table */
+{
+	return dict_table_is_comp(table);
 }

--- 1.37.6.1/innobase/dict/dict0load.c	2006-04-21 01:07:32 +04:00
+++ 1.46/storage/innobase/dict/dict0load.c	2006-04-21 02:03:25 +04:00
@@ -47,18 +47,18 @@
 	byte*		field;
 	ulint		len;
 	mtr_t		mtr;
-	
+
 #ifdef UNIV_SYNC_DEBUG
 	ut_ad(mutex_own(&(dict_sys->mutex)));
 #endif /* UNIV_SYNC_DEBUG */
 
 	heap = mem_heap_create(1000);
-	
+
 	mtr_start(&mtr);
 
 	sys_tables = dict_table_get_low("SYS_TABLES");
 	sys_index = UT_LIST_GET_FIRST(sys_tables->indexes);
-	ut_a(!sys_tables->comp);
+	ut_a(!dict_table_is_comp(sys_tables));
 
 	tuple = dtuple_create(heap, 1);
 	dfield = dtuple_get_nth_field(tuple, 0);
@@ -77,36 +77,36 @@
 		btr_pcur_close(&pcur);
 		mtr_commit(&mtr);
 		mem_heap_free(heap);
-		
+
 		return(NULL);
-	}	
+	}
 
 	field = rec_get_nth_field_old(rec, 0, &len);
 
 	if (len < strlen(name)
-	    || ut_memcmp(name, field, strlen(name)) != 0) {
+		|| ut_memcmp(name, field, strlen(name)) != 0) {
 		/* Not found */
 
 		btr_pcur_close(&pcur);
 		mtr_commit(&mtr);
 		mem_heap_free(heap);
-		
+
 		return(NULL);
 	}
 
-	if (!rec_get_deleted_flag(rec, sys_tables->comp)) {
+	if (!rec_get_deleted_flag(rec, 0)) {
 
 		/* We found one */
 
-                char*	table_name = mem_strdupl((char*) field, len);
-		
+		char*	table_name = mem_strdupl((char*) field, len);
+
 		btr_pcur_close(&pcur);
 		mtr_commit(&mtr);
 		mem_heap_free(heap);
-		
+
 		return(table_name);
 	}
-	
+
 	btr_pcur_move_to_next_user_rec(&pcur, &mtr);
 
 	goto loop;
@@ -128,7 +128,7 @@
 	byte*		field;
 	ulint		len;
 	mtr_t		mtr;
-	
+
 	/* Enlarge the fatal semaphore wait timeout during the InnoDB table
 	monitor printout */
 
@@ -155,7 +155,7 @@
 
 		btr_pcur_close(&pcur);
 		mtr_commit(&mtr);
-		
+
 		mutex_exit(&(dict_sys->mutex));
 
 		/* Restore the fatal semaphore wait timeout */
@@ -165,15 +165,15 @@
 		mutex_exit(&kernel_mutex);
 
 		return;
-	}	
+	}
 
 	field = rec_get_nth_field_old(rec, 0, &len);
 
-	if (!rec_get_deleted_flag(rec, sys_tables->comp)) {
+	if (!rec_get_deleted_flag(rec, 0)) {
 
 		/* We found one */
 
-                char*	table_name = mem_strdupl((char*) field, len);
+		char*	table_name = mem_strdupl((char*) field, len);
 
 		btr_pcur_store_position(&pcur, &mtr);
 
@@ -228,14 +228,14 @@
 	ulint		space_id;
 	ulint		max_space_id	= 0;
 	mtr_t		mtr;
-	
+
 	mutex_enter(&(dict_sys->mutex));
 
 	mtr_start(&mtr);
 
 	sys_tables = dict_table_get_low("SYS_TABLES");
 	sys_index = UT_LIST_GET_FIRST(sys_tables->indexes);
-	ut_a(!sys_tables->comp);
+	ut_a(!dict_table_is_comp(sys_tables));
 
 	btr_pcur_open_at_index_side(TRUE, sys_index, BTR_SEARCH_LEAF, &pcur,
 								TRUE, &mtr);
@@ -249,40 +249,40 @@
 
 		btr_pcur_close(&pcur);
 		mtr_commit(&mtr);
-		
+
 		/* We must make the tablespace cache aware of the biggest
 		known space id */
 
 		/* printf("Biggest space id in data dictionary %lu\n",
-							    max_space_id); */
+		   max_space_id); */
 		fil_set_max_space_id_if_bigger(max_space_id);
 
 		mutex_exit(&(dict_sys->mutex));
 
 		return;
-	}	
+	}
 
 	field = rec_get_nth_field_old(rec, 0, &len);
 
-	if (!rec_get_deleted_flag(rec, sys_tables->comp)) {
+	if (!rec_get_deleted_flag(rec, 0)) {
 
 		/* We found one */
 
-                char*	name = mem_strdupl((char*) field, len);
+		char*	name = mem_strdupl((char*) field, len);
 
 		field = rec_get_nth_field_old(rec, 9, &len);
 		ut_a(len == 4);
-			
+
 		space_id = mach_read_from_4(field);
 
 		btr_pcur_store_position(&pcur, &mtr);
 
 		mtr_commit(&mtr);
-		
+
 		if (space_id != 0 && in_crash_recovery) {
 			/* Check that the tablespace (the .ibd file) really
 			exists; print a warning to the .err log if not */
-			
+
 			fil_space_for_table_exists_in_mem(space_id, name,
 							FALSE, TRUE, TRUE);
 		}
@@ -334,7 +334,7 @@
 	ulint		prec;
 	ulint		i;
 	mtr_t		mtr;
-	
+
 #ifdef UNIV_SYNC_DEBUG
 	ut_ad(mutex_own(&(dict_sys->mutex)));
 #endif /* UNIV_SYNC_DEBUG */
@@ -343,7 +343,7 @@
 
 	sys_columns = dict_table_get_low("SYS_COLUMNS");
 	sys_index = UT_LIST_GET_FIRST(sys_columns->indexes);
-	ut_a(!sys_columns->comp);
+	ut_a(!dict_table_is_comp(sys_columns));
 
 	tuple = dtuple_create(heap, 1);
 	dfield = dtuple_get_nth_field(tuple, 0);
@@ -356,13 +356,13 @@
 
 	btr_pcur_open_on_user_rec(sys_index, tuple, PAGE_CUR_GE,
 						BTR_SEARCH_LEAF, &pcur, &mtr);
-   	for (i = 0; i < table->n_cols - DATA_N_SYS_COLS; i++) {
+	for (i = 0; i < table->n_cols - DATA_N_SYS_COLS; i++) {
 
 		rec = btr_pcur_get_rec(&pcur);
 
 		ut_a(btr_pcur_is_on_user_rec(&pcur, &mtr));
 
-		ut_a(!rec_get_deleted_flag(rec, sys_columns->comp));
+		ut_a(!rec_get_deleted_flag(rec, 0));
 
 		field = rec_get_nth_field_old(rec, 0, &len);
 		ut_ad(len == 8);
@@ -417,7 +417,7 @@
 		dict_mem_table_add_col(table, name, mtype, prtype, col_len,
 									prec);
 		btr_pcur_move_to_next_user_rec(&pcur, &mtr);
-	} 
+	}
 
 	btr_pcur_close(&pcur);
 	mtr_commit(&mtr);
@@ -465,7 +465,7 @@
 	byte*		buf;
 	ulint		i;
 	mtr_t		mtr;
-	
+
 #ifdef UNIV_SYNC_DEBUG
 	ut_ad(mutex_own(&(dict_sys->mutex)));
 #endif /* UNIV_SYNC_DEBUG */
@@ -476,7 +476,7 @@
 
 	sys_fields = dict_table_get_low("SYS_FIELDS");
 	sys_index = UT_LIST_GET_FIRST(sys_fields->indexes);
-	ut_a(!sys_fields->comp);
+	ut_a(!dict_table_is_comp(sys_fields));
 
 	tuple = dtuple_create(heap, 1);
 	dfield = dtuple_get_nth_field(tuple, 0);
@@ -489,15 +489,15 @@
 
 	btr_pcur_open_on_user_rec(sys_index, tuple, PAGE_CUR_GE,
 						BTR_SEARCH_LEAF, &pcur, &mtr);
-   	for (i = 0; i < index->n_fields; i++) {
+	for (i = 0; i < index->n_fields; i++) {
 
 		rec = btr_pcur_get_rec(&pcur);
 
 		ut_a(btr_pcur_is_on_user_rec(&pcur, &mtr));
-		if (rec_get_deleted_flag(rec, sys_fields->comp)) {
+		if (rec_get_deleted_flag(rec, 0)) {
 			dict_load_report_deleted_index(table->name, i);
 		}
-		
+
 		field = rec_get_nth_field_old(rec, 0, &len);
 		ut_ad(len == 8);
 		ut_a(ut_memcmp(buf, field, len) == 0);
@@ -516,14 +516,14 @@
 		pos_and_prefix_len = mach_read_from_4(field);
 
 		ut_a((pos_and_prefix_len & 0xFFFFUL) == i
-		     || (pos_and_prefix_len & 0xFFFF0000UL) == (i << 16));
+			|| (pos_and_prefix_len & 0xFFFF0000UL) == (i << 16));
 
 		if ((i == 0 && pos_and_prefix_len > 0)
-		    || (pos_and_prefix_len & 0xFFFF0000UL) > 0) {
+			|| (pos_and_prefix_len & 0xFFFF0000UL) > 0) {
 
-		        prefix_len = pos_and_prefix_len & 0xFFFFUL;
+			prefix_len = pos_and_prefix_len & 0xFFFFUL;
 		} else {
-		        prefix_len = 0;
+			prefix_len = 0;
 		}
 
 		ut_a(0 == ut_strcmp("COL_NAME",
@@ -533,10 +533,10 @@
 		field = rec_get_nth_field_old(rec, 4, &len);
 
 		dict_mem_index_add_field(index,
-                                         mem_heap_strdupl(heap, (char*) field, len), 0, prefix_len);
+					 mem_heap_strdupl(heap, (char*) field, len), prefix_len);
 
 		btr_pcur_move_to_next_user_rec(&pcur, &mtr);
-	} 
+	}
 
 	btr_pcur_close(&pcur);
 	mtr_commit(&mtr);
@@ -573,23 +573,23 @@
 	ibool		is_sys_table;
 	dulint		id;
 	mtr_t		mtr;
-	
+
 #ifdef UNIV_SYNC_DEBUG
 	ut_ad(mutex_own(&(dict_sys->mutex)));
 #endif /* UNIV_SYNC_DEBUG */
 
 	if ((ut_dulint_get_high(table->id) == 0)
-	    && (ut_dulint_get_low(table->id) < DICT_HDR_FIRST_ID)) {
+		&& (ut_dulint_get_low(table->id) < DICT_HDR_FIRST_ID)) {
 		is_sys_table = TRUE;
 	} else {
 		is_sys_table = FALSE;
 	}
-	
+
 	mtr_start(&mtr);
 
 	sys_indexes = dict_table_get_low("SYS_INDEXES");
 	sys_index = UT_LIST_GET_FIRST(sys_indexes->indexes);
-	ut_a(!sys_indexes->comp);
+	ut_a(!dict_table_is_comp(sys_indexes));
 
 	tuple = dtuple_create(heap, 1);
 	dfield = dtuple_get_nth_field(tuple, 0);
@@ -602,14 +602,14 @@
 
 	btr_pcur_open_on_user_rec(sys_index, tuple, PAGE_CUR_GE,
 						BTR_SEARCH_LEAF, &pcur, &mtr);
-   	for (;;) {
+	for (;;) {
 		if (!btr_pcur_is_on_user_rec(&pcur, &mtr)) {
 
 			break;
 		}
 
 		rec = btr_pcur_get_rec(&pcur);
-		
+
 		field = rec_get_nth_field_old(rec, 0, &len);
 		ut_ad(len == 8);
 
@@ -617,7 +617,7 @@
 			break;
 		}
 
-		if (rec_get_deleted_flag(rec, table->comp)) {
+		if (rec_get_deleted_flag(rec, dict_table_is_comp(table))) {
 			dict_load_report_deleted_index(table->name,
 				ULINT_UNDEFINED);
 
@@ -668,7 +668,7 @@
 		}
 
 		if ((type & DICT_CLUSTERED) == 0
-			    && NULL == dict_table_get_first_index(table)) {
+			&& NULL == dict_table_get_first_index(table)) {
 
 			fprintf(stderr,
 		"InnoDB: Error: trying to load index %s for table %s\n"
@@ -680,27 +680,27 @@
 
 			return(FALSE);
 		}
-		
+
 		if (is_sys_table
-		    && ((type & DICT_CLUSTERED)
-		        || ((table == dict_sys->sys_tables)
-		            && (name_len == (sizeof "ID_IND") - 1)
-			    && (0 == ut_memcmp(name_buf, "ID_IND",
+			&& ((type & DICT_CLUSTERED)
+			|| ((table == dict_sys->sys_tables)
+				&& (name_len == (sizeof "ID_IND") - 1)
+				&& (0 == ut_memcmp(name_buf, "ID_IND",
 							name_len))))) {
 
 			/* The index was created in memory already at booting
 			of the database server */
 		} else {
- 			index = dict_mem_index_create(table->name, name_buf,
+			index = dict_mem_index_create(table->name, name_buf,
 						space, type, n_fields);
 			index->id = id;
-		
+
 			dict_load_fields(table, index, heap);
 			dict_index_add_to_cache(table, index, page_no);
 		}
 
 		btr_pcur_move_to_next_user_rec(&pcur, &mtr);
-	} 
+	}
 
 	btr_pcur_close(&pcur);
 	mtr_commit(&mtr);
@@ -739,20 +739,21 @@
 	ulint		len;
 	ulint		space;
 	ulint		n_cols;
+	ulint		flags;
 	ulint		err;
 	mtr_t		mtr;
-	
+
 #ifdef UNIV_SYNC_DEBUG
 	ut_ad(mutex_own(&(dict_sys->mutex)));
 #endif /* UNIV_SYNC_DEBUG */
 
 	heap = mem_heap_create(1000);
-	
+
 	mtr_start(&mtr);
 
 	sys_tables = dict_table_get_low("SYS_TABLES");
 	sys_index = UT_LIST_GET_FIRST(sys_tables->indexes);
-	ut_a(!sys_tables->comp);
+	ut_a(!dict_table_is_comp(sys_tables));
 
 	tuple = dtuple_create(heap, 1);
 	dfield = dtuple_get_nth_field(tuple, 0);
@@ -765,28 +766,31 @@
 	rec = btr_pcur_get_rec(&pcur);
 
 	if (!btr_pcur_is_on_user_rec(&pcur, &mtr)
-			|| rec_get_deleted_flag(rec, sys_tables->comp)) {
+			|| rec_get_deleted_flag(rec, 0)) {
 		/* Not found */
-	err_exit:
+
 		btr_pcur_close(&pcur);
 		mtr_commit(&mtr);
 		mem_heap_free(heap);
-		
+
 		return(NULL);
-	}	
+	}
 
 	field = rec_get_nth_field_old(rec, 0, &len);
 
 	/* Check if the table name in record is the searched one */
 	if (len != ut_strlen(name) || ut_memcmp(name, field, len) != 0) {
+		btr_pcur_close(&pcur);
+		mtr_commit(&mtr);
+		mem_heap_free(heap);
 
-		goto err_exit;
+		return(NULL);
 	}
 
 	ut_a(0 == ut_strcmp("SPACE",
 		dict_field_get_col(
 		dict_index_get_nth_field(sys_index, 9))->name));
-	
+
 	field = rec_get_nth_field_old(rec, 9, &len);
 	space = mach_read_from_4(field);
 
@@ -824,10 +828,15 @@
 	field = rec_get_nth_field_old(rec, 4, &len);
 	n_cols = mach_read_from_4(field);
 
+	flags = 0;
+
 	/* The high-order bit of N_COLS is the "compact format" flag. */
-	table = dict_mem_table_create(name, space,
-					n_cols & ~0x80000000UL,
-					!!(n_cols & 0x80000000UL));
+	if (n_cols & 0x80000000UL) {
+		flags |= DICT_TF_COMPACT;
+	}
+
+	table = dict_mem_table_create(name, space, n_cols & ~0x80000000UL,
+		flags);
 
 	table->ibd_file_missing = ibd_file_missing;
 
@@ -841,14 +850,6 @@
 	field = rec_get_nth_field_old(rec, 5, &len);
 	table->type = mach_read_from_4(field);
 
-	if (UNIV_UNLIKELY(table->type != DICT_TABLE_ORDINARY)) {
-		ut_print_timestamp(stderr);
-		fprintf(stderr,
-			"  InnoDB: table %s: unknown table type %lu\n",
-			name, (ulong) table->type);
-		goto err_exit;
-	}
-
 	if (table->type == DICT_TABLE_CLUSTER_MEMBER) {
 		ut_error;
 #if 0 /* clustered tables have not been implemented yet */
@@ -861,7 +862,7 @@
 	}
 
 	if ((table->type == DICT_TABLE_CLUSTER)
-	    || (table->type == DICT_TABLE_CLUSTER_MEMBER)) {
+		|| (table->type == DICT_TABLE_CLUSTER_MEMBER)) {
 
 		field = rec_get_nth_field_old(rec, 7, &len);
 		ut_a(len == 4);
@@ -880,17 +881,17 @@
 	dict_load_columns(table, heap);
 
 	dict_table_add_to_cache(table);
-	
+
 	dict_load_indexes(table, heap);
-	
+
 	err = dict_load_foreigns(table->name, TRUE);
 /*
 	if (err != DB_SUCCESS) {
-	
- 		mutex_enter(&dict_foreign_err_mutex);
 
- 		ut_print_timestamp(stderr);
- 		
+		mutex_enter(&dict_foreign_err_mutex);
+
+		ut_print_timestamp(stderr);
+
 		fprintf(stderr,
 "  InnoDB: Error: could not make a foreign key definition to match\n"
 "InnoDB: the foreign key table or the referenced table!\n"
@@ -898,7 +899,7 @@
 "InnoDB: and recreate the foreign key table or the referenced table.\n"
 "InnoDB: Submit a detailed bug report to http://bugs.mysql.com\n"
 "InnoDB: Latest foreign key error printout:\n%s\n", dict_foreign_err_buf);
-				
+
 		mutex_exit(&dict_foreign_err_mutex);
 	}
 */
@@ -914,21 +915,21 @@
 dict_load_table_on_id(
 /*==================*/
 				/* out: table; NULL if table does not exist */
-	dulint	table_id)	/* in: table id */	
+	dulint	table_id)	/* in: table id */
 {
 	byte		id_buf[8];
 	btr_pcur_t	pcur;
-	mem_heap_t* 	heap;
+	mem_heap_t*	heap;
 	dtuple_t*	tuple;
 	dfield_t*	dfield;
 	dict_index_t*	sys_table_ids;
 	dict_table_t*	sys_tables;
 	rec_t*		rec;
 	byte*		field;
-	ulint		len;	
+	ulint		len;
 	dict_table_t*	table;
 	mtr_t		mtr;
-	
+
 #ifdef UNIV_SYNC_DEBUG
 	ut_ad(mutex_own(&(dict_sys->mutex)));
 #endif /* UNIV_SYNC_DEBUG */
@@ -937,13 +938,13 @@
 	the dictionary mutex, and therefore no deadlocks can occur
 	with other dictionary operations. */
 
-	mtr_start(&mtr);	
+	mtr_start(&mtr);
 	/*---------------------------------------------------*/
-	/* Get the secondary index based on ID for table SYS_TABLES */	
+	/* Get the secondary index based on ID for table SYS_TABLES */
 	sys_tables = dict_sys->sys_tables;
 	sys_table_ids = dict_table_get_next_index(
 				dict_table_get_first_index(sys_tables));
-	ut_a(!sys_tables->comp);
+	ut_a(!dict_table_is_comp(sys_tables));
 	heap = mem_heap_create(256);
 
 	tuple  = dtuple_create(heap, 1);
@@ -951,22 +952,22 @@
 
 	/* Write the table id in byte format to id_buf */
 	mach_write_to_8(id_buf, table_id);
-	
+
 	dfield_set_data(dfield, id_buf, 8);
 	dict_index_copy_types(tuple, sys_table_ids, 1);
 
 	btr_pcur_open_on_user_rec(sys_table_ids, tuple, PAGE_CUR_GE,
 						BTR_SEARCH_LEAF, &pcur, &mtr);
 	rec = btr_pcur_get_rec(&pcur);
-	
+
 	if (!btr_pcur_is_on_user_rec(&pcur, &mtr)
-			|| rec_get_deleted_flag(rec, sys_tables->comp)) {
+			|| rec_get_deleted_flag(rec, 0)) {
 		/* Not found */
 
 		btr_pcur_close(&pcur);
 		mtr_commit(&mtr);
 		mem_heap_free(heap);
-		
+
 		return(NULL);
 	}
 
@@ -984,15 +985,15 @@
 		btr_pcur_close(&pcur);
 		mtr_commit(&mtr);
 		mem_heap_free(heap);
-		
+
 		return(NULL);
 	}
-		
+
 	/* Now we get the table name from the record */
 	field = rec_get_nth_field_old(rec, 1, &len);
 	/* Load the table definition to memory */
 	table = dict_load_table(mem_heap_strdupl(heap, (char*) field, len));
-	
+
 	btr_pcur_close(&pcur);
 	mtr_commit(&mtr);
 	mem_heap_free(heap);
@@ -1019,7 +1020,7 @@
 	heap = mem_heap_create(1000);
 
 	dict_load_indexes(table, heap);
-	
+
 	mem_heap_free(heap);
 }
 
@@ -1043,7 +1044,7 @@
 	ulint		len;
 	ulint		i;
 	mtr_t		mtr;
-	
+
 #ifdef UNIV_SYNC_DEBUG
 	ut_ad(mutex_own(&(dict_sys->mutex)));
 #endif /* UNIV_SYNC_DEBUG */
@@ -1057,7 +1058,7 @@
 
 	sys_foreign_cols = dict_table_get_low("SYS_FOREIGN_COLS");
 	sys_index = UT_LIST_GET_FIRST(sys_foreign_cols->indexes);
-	ut_a(!sys_foreign_cols->comp);
+	ut_a(!dict_table_is_comp(sys_foreign_cols));
 
 	tuple = dtuple_create(foreign->heap, 1);
 	dfield = dtuple_get_nth_field(tuple, 0);
@@ -1067,12 +1068,12 @@
 
 	btr_pcur_open_on_user_rec(sys_index, tuple, PAGE_CUR_GE,
 						BTR_SEARCH_LEAF, &pcur, &mtr);
-   	for (i = 0; i < foreign->n_fields; i++) {
+	for (i = 0; i < foreign->n_fields; i++) {
 
 		rec = btr_pcur_get_rec(&pcur);
 
 		ut_a(btr_pcur_is_on_user_rec(&pcur, &mtr));
-		ut_a(!rec_get_deleted_flag(rec, sys_foreign_cols->comp));
+		ut_a(!rec_get_deleted_flag(rec, 0));
 
 		field = rec_get_nth_field_old(rec, 0, &len);
 		ut_a(len == ut_strlen(id));
@@ -1084,14 +1085,14 @@
 
 		field = rec_get_nth_field_old(rec, 4, &len);
 		foreign->foreign_col_names[i] =
-                        mem_heap_strdupl(foreign->heap, (char*) field, len);
+			mem_heap_strdupl(foreign->heap, (char*) field, len);
 
 		field = rec_get_nth_field_old(rec, 5, &len);
 		foreign->referenced_col_names[i] =
-                  mem_heap_strdupl(foreign->heap, (char*) field, len);
+		  mem_heap_strdupl(foreign->heap, (char*) field, len);
 
 		btr_pcur_move_to_next_user_rec(&pcur, &mtr);
-	} 
+	}
 
 	btr_pcur_close(&pcur);
 	mtr_commit(&mtr);
@@ -1107,7 +1108,7 @@
 	const char*	id,	/* in: foreign constraint id as a
 				null-terminated string */
 	ibool		check_charsets)/* in: TRUE=check charset compatibility */
-{	
+{
 	dict_foreign_t*	foreign;
 	dict_table_t*	sys_foreign;
 	btr_pcur_t	pcur;
@@ -1119,18 +1120,18 @@
 	byte*		field;
 	ulint		len;
 	mtr_t		mtr;
-	
+
 #ifdef UNIV_SYNC_DEBUG
 	ut_ad(mutex_own(&(dict_sys->mutex)));
 #endif /* UNIV_SYNC_DEBUG */
 
 	heap2 = mem_heap_create(1000);
-	
+
 	mtr_start(&mtr);
 
 	sys_foreign = dict_table_get_low("SYS_FOREIGN");
 	sys_index = UT_LIST_GET_FIRST(sys_foreign->indexes);
-	ut_a(!sys_foreign->comp);
+	ut_a(!dict_table_is_comp(sys_foreign));
 
 	tuple = dtuple_create(heap2, 1);
 	dfield = dtuple_get_nth_field(tuple, 0);
@@ -1143,7 +1144,7 @@
 	rec = btr_pcur_get_rec(&pcur);
 
 	if (!btr_pcur_is_on_user_rec(&pcur, &mtr)
-			|| rec_get_deleted_flag(rec, sys_foreign->comp)) {
+			|| rec_get_deleted_flag(rec, 0)) {
 		/* Not found */
 
 		fprintf(stderr,
@@ -1153,9 +1154,9 @@
 		btr_pcur_close(&pcur);
 		mtr_commit(&mtr);
 		mem_heap_free(heap2);
-		
+
 		return(DB_ERROR);
-	}	
+	}
 
 	field = rec_get_nth_field_old(rec, 0, &len);
 
@@ -1169,7 +1170,7 @@
 		btr_pcur_close(&pcur);
 		mtr_commit(&mtr);
 		mem_heap_free(heap2);
-		
+
 		return(DB_ERROR);
 	}
 
@@ -1177,7 +1178,7 @@
 	with the constraint */
 
 	mem_heap_free(heap2);
-	
+
 	foreign = dict_mem_foreign_create();
 
 	foreign->n_fields =
@@ -1186,19 +1187,19 @@
 	ut_a(len == 4);
 
 	/* We store the type to the bits 24-31 of n_fields */
-	
+
 	foreign->type = foreign->n_fields >> 24;
 	foreign->n_fields = foreign->n_fields & 0xFFFFFFUL;
-	
+
 	foreign->id = mem_heap_strdup(foreign->heap, id);
 
 	field = rec_get_nth_field_old(rec, 3, &len);
 	foreign->foreign_table_name =
-                mem_heap_strdupl(foreign->heap, (char*) field, len);
+		mem_heap_strdupl(foreign->heap, (char*) field, len);
 
 	field = rec_get_nth_field_old(rec, 4, &len);
 	foreign->referenced_table_name =
-                mem_heap_strdupl(foreign->heap, (char*) field, len);
+		mem_heap_strdupl(foreign->heap, (char*) field, len);
 
 	btr_pcur_close(&pcur);
 	mtr_commit(&mtr);
@@ -1238,18 +1239,18 @@
 					compatibility */
 {
 	btr_pcur_t	pcur;
-	mem_heap_t* 	heap;
+	mem_heap_t*	heap;
 	dtuple_t*	tuple;
 	dfield_t*	dfield;
 	dict_index_t*	sec_index;
 	dict_table_t*	sys_foreign;
 	rec_t*		rec;
 	byte*		field;
-	ulint		len;	
+	ulint		len;
 	char*		id ;
 	ulint		err;
 	mtr_t		mtr;
-	
+
 #ifdef UNIV_SYNC_DEBUG
 	ut_ad(mutex_own(&(dict_sys->mutex)));
 #endif /* UNIV_SYNC_DEBUG */
@@ -1261,15 +1262,15 @@
 
 		fprintf(stderr,
 	"InnoDB: Error: no foreign key system tables in the database\n");
-		
+
 		return(DB_ERROR);
 	}
 
-	ut_a(!sys_foreign->comp);
-	mtr_start(&mtr);	
+	ut_a(!dict_table_is_comp(sys_foreign));
+	mtr_start(&mtr);
 
 	/* Get the secondary index based on FOR_NAME from table
-	SYS_FOREIGN */	
+	SYS_FOREIGN */
 
 	sec_index = dict_table_get_next_index(
 				dict_table_get_first_index(sys_foreign));
@@ -1286,7 +1287,7 @@
 						BTR_SEARCH_LEAF, &pcur, &mtr);
 loop:
 	rec = btr_pcur_get_rec(&pcur);
-	
+
 	if (!btr_pcur_is_on_user_rec(&pcur, &mtr)) {
 		/* End of index */
 
@@ -1306,7 +1307,7 @@
 	if (0 != cmp_data_data(dfield_get_type(dfield),
 			dfield_get_data(dfield), dfield_get_len(dfield),
 			field, len)) {
-		
+
 		goto load_next_index;
 	}
 
@@ -1319,8 +1320,8 @@
 
 		goto next_rec;
 	}
-		
-	if (rec_get_deleted_flag(rec, sys_foreign->comp)) {
+
+	if (rec_get_deleted_flag(rec, 0)) {
 
 		goto next_rec;
 	}
@@ -1328,13 +1329,13 @@
 	/* Now we get a foreign key constraint id */
 	field = rec_get_nth_field_old(rec, 1, &len);
 	id = mem_heap_strdupl(heap, (char*) field, len);
-	
+
 	btr_pcur_store_position(&pcur, &mtr);
 
 	mtr_commit(&mtr);
 
 	/* Load the foreign constraint definition to the dictionary cache */
-	
+
 	err = dict_load_foreign(id, check_charsets);
 
 	if (err != DB_SUCCESS) {
@@ -1356,12 +1357,12 @@
 	btr_pcur_close(&pcur);
 	mtr_commit(&mtr);
 	mem_heap_free(heap);
-	
+
 	sec_index = dict_table_get_next_index(sec_index);
 
 	if (sec_index != NULL) {
 
-		mtr_start(&mtr);	
+		mtr_start(&mtr);
 
 		goto start_load;
 	}

--- 1.16.2.2/innobase/dict/dict0mem.c	2006-04-21 01:07:32 +04:00
+++ 1.20/storage/innobase/dict/dict0mem.c	2006-04-21 02:03:25 +04:00
@@ -36,13 +36,13 @@
 				ignored if the table is made a member of
 				a cluster */
 	ulint		n_cols,	/* in: number of columns */
-	ibool		comp)	/* in: TRUE=compact page format */
+	ulint		flags)	/* in: table flags */
 {
 	dict_table_t*	table;
 	mem_heap_t*	heap;
-	
+
 	ut_ad(name);
-	ut_ad(comp == FALSE || comp == TRUE);
+	ut_ad(!(flags & ~DICT_TF_COMPACT));
 
 	heap = mem_heap_create(DICT_HEAP_SIZE);
 
@@ -51,24 +51,24 @@
 	table->heap = heap;
 
 	table->type = DICT_TABLE_ORDINARY;
+	table->flags = flags;
 	table->name = mem_heap_strdup(heap, name);
 	table->dir_path_of_temp_table = NULL;
 	table->space = space;
 	table->ibd_file_missing = FALSE;
 	table->tablespace_discarded = FALSE;
-	table->comp = comp;
 	table->n_def = 0;
 	table->n_cols = n_cols + DATA_N_SYS_COLS;
 	table->mem_fix = 0;
 
 	table->n_mysql_handles_opened = 0;
 	table->n_foreign_key_checks_running = 0;
-		
+
 	table->cached = FALSE;
-	
+
 	table->mix_id = ut_dulint_zero;
 	table->mix_len = 0;
-	
+
 	table->cols = mem_heap_alloc(heap, (n_cols + DATA_N_SYS_COLS)
 							* sizeof(dict_col_t));
 	UT_LIST_INIT(table->indexes);
@@ -86,30 +86,15 @@
 	table->stat_initialized = FALSE;
 
 	table->stat_modified_counter = 0;
-	
+
 	mutex_create(&(table->autoinc_mutex));
 	mutex_set_level(&(table->autoinc_mutex), SYNC_DICT_AUTOINC_MUTEX);
 
 	table->autoinc_inited = FALSE;
 
 	table->magic_n = DICT_TABLE_MAGIC_N;
-	
-	return(table);
-}
-
-/********************************************************************
-Free a table memory object. */
-
-void
-dict_mem_table_free(
-/*================*/
-	dict_table_t*	table)		/* in: table */
-{
-	ut_ad(table);
-	ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
 
-	mutex_free(&(table->autoinc_mutex));
-	mem_heap_free(table->heap);
+	return(table);
 }
 
 /**************************************************************************
@@ -129,7 +114,7 @@
 	dict_table_t*		cluster;
 
 	/* Clustered tables cannot work with the compact record format. */
-	cluster = dict_mem_table_create(name, space, n_cols, FALSE);
+	cluster = dict_mem_table_create(name, space, n_cols, 0);
 
 	cluster->type = DICT_TABLE_CLUSTER;
 	cluster->mix_len = mix_len;
@@ -165,13 +150,13 @@
 {
 	dict_col_t*	col;
 	dtype_t*	type;
-	
+
 	ut_ad(table && name);
 	ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
-	
+
 	table->n_def++;
 
-	col = dict_table_get_nth_col(table, table->n_def - 1);	
+	col = dict_table_get_nth_col(table, table->n_def - 1);
 
 	col->ind = table->n_def - 1;
 	col->name = mem_heap_strdup(table->heap, name);
@@ -179,7 +164,7 @@
 	col->ord_part = 0;
 
 	col->clust_pos = ULINT_UNDEFINED;
-	
+
 	type = dict_col_get_type(col);
 
 	dtype_set(type, mtype, prtype, len, prec);
@@ -203,14 +188,14 @@
 {
 	dict_index_t*	index;
 	mem_heap_t*	heap;
-	
+
 	ut_ad(table_name && index_name);
 
 	heap = mem_heap_create(DICT_HEAP_SIZE);
 	index = mem_heap_alloc(heap, sizeof(dict_index_t));
 
 	index->heap = heap;
-	
+
 	index->type = type;
 	index->space = space;
 	index->name = mem_heap_strdup(heap, index_name);
@@ -276,24 +261,20 @@
 /*=====================*/
 	dict_index_t*	index,		/* in: index */
 	const char*	name,		/* in: column name */
-	ulint		order,		/* in: order criterion; 0 means an
-					ascending order */
 	ulint		prefix_len)	/* in: 0 or the column prefix length
 					in a MySQL index like
 					INDEX (textcol(25)) */
 {
 	dict_field_t*	field;
-	
+
 	ut_ad(index && name);
 	ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
-	
+
 	index->n_def++;
 
-	field = dict_index_get_nth_field(index, index->n_def - 1);	
+	field = dict_index_get_nth_field(index, index->n_def - 1);
 
 	field->name = name;
-	field->order = order;
-
 	field->prefix_len = prefix_len;
 }
 
@@ -305,8 +286,5 @@
 /*================*/
 	dict_index_t*	index)	/* in: index */
 {
-	ut_ad(index);
-	ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
-
 	mem_heap_free(index->heap);
 }

--- 1.36.3.1/innobase/ibuf/ibuf0ibuf.c	2006-04-21 01:07:32 +04:00
+++ 1.41/storage/innobase/ibuf/ibuf0ibuf.c	2006-04-21 02:03:25 +04:00
@@ -29,7 +29,7 @@
 #include "log0recv.h"
 #include "que0que.h"
 
-/*      STRUCTURE OF AN INSERT BUFFER RECORD
+/*	STRUCTURE OF AN INSERT BUFFER RECORD
 
 In versions < 4.1.x:
 
@@ -140,8 +140,7 @@
 /* The insert buffer control structure */
 ibuf_t*	ibuf			= NULL;
 
-static
-ulint	ibuf_rnd		= 986058871;
+static ulint ibuf_rnd		= 986058871;
 
 ulint	ibuf_flush_count	= 0;
 
@@ -369,14 +368,14 @@
 	/* Note that also a pessimistic delete can sometimes make a B-tree
 	grow in size, as the references on the upper levels of the tree can
 	change */
-	
+
 	ibuf->max_size = buf_pool_get_curr_size() / UNIV_PAGE_SIZE
 						/ IBUF_POOL_SIZE_PER_MAX_SIZE;
 	ibuf->meter = IBUF_THRESHOLD + 1;
 
 	UT_LIST_INIT(ibuf->data_list);
 
-	ibuf->size = 0;					
+	ibuf->size = 0;
 
 #ifdef UNIV_IBUF_DEBUG
 	{
@@ -390,7 +389,7 @@
 				ibuf_count_set(i, j, 0);
 			}
 		}
-	}	
+	}
 #endif
 	mutex_create(&ibuf_pessimistic_insert_mutex);
 
@@ -427,7 +426,7 @@
 #endif /* UNIV_SYNC_DEBUG */
 
 	old_size = data->size;
-	
+
 	data->free_list_len = flst_get_len(root + PAGE_HEADER
 					   + PAGE_BTR_IBUF_FREE_LIST, mtr);
 
@@ -473,7 +472,7 @@
 	dict_table_t*	table;
 	dict_index_t*	index;
 	ulint		n_used;
-	
+
 	ut_a(space == 0);
 
 #ifdef UNIV_LOG_DEBUG
@@ -499,11 +498,11 @@
 	fseg_n_reserved_pages(header_page + IBUF_HEADER + IBUF_TREE_SEG_HEADER,
 								&n_used, &mtr);
 	ibuf_enter();
-	
+
 	ut_ad(n_used >= 2);
 
 	data->seg_size = n_used;
-	
+
 	root = buf_page_get(space, FSP_IBUF_TREE_ROOT_PAGE_NO, RW_X_LATCH,
 								&mtr);
 #ifdef UNIV_SYNC_DEBUG
@@ -514,7 +513,7 @@
 	data->n_inserts = 0;
 	data->n_merges = 0;
 	data->n_merged_recs = 0;
-	
+
 	ibuf_data_sizes_update(data, root, &mtr);
 /*
 	if (!data->empty) {
@@ -533,7 +532,7 @@
 
 	sprintf(buf, "SYS_IBUF_TABLE_%lu", (ulong) space);
 	/* use old-style record format for the insert buffer */
-	table = dict_mem_table_create(buf, space, 2, FALSE);
+	table = dict_mem_table_create(buf, space, 2, 0);
 
 	dict_mem_table_add_col(table, "PAGE_NO", DATA_BINARY, 0, 0, 0);
 	dict_mem_table_add_col(table, "TYPES", DATA_BINARY, 0, 0, 0);
@@ -545,8 +544,8 @@
 	index = dict_mem_index_create(buf, "CLUST_IND", space,
 				DICT_CLUSTERED | DICT_UNIVERSAL | DICT_IBUF,2);
 
-	dict_mem_index_add_field(index, "PAGE_NO", 0, 0);
-	dict_mem_index_add_field(index, "TYPES", 0, 0);
+	dict_mem_index_add_field(index, "PAGE_NO", 0);
+	dict_mem_index_add_field(index, "TYPES", 0);
 
 	index->id = ut_dulint_add(DICT_IBUF_ID_MIN, space);
 
@@ -574,18 +573,18 @@
 {
 	ulint	bit_offset;
 	ulint	byte_offset;
-	ulint	i;
 
 	/* Write all zeros to the bitmap */
 
 	bit_offset = XDES_DESCRIBED_PER_PAGE * IBUF_BITS_PER_PAGE;
 
-	byte_offset = bit_offset / 8 + 1;
+	byte_offset = bit_offset / 8 + 1; /* better: (bit_offset + 7) / 8 */
 
-	for (i = IBUF_BITMAP; i < IBUF_BITMAP + byte_offset; i++) {
+	fil_page_set_type(page, FIL_PAGE_IBUF_BITMAP);
 
-		*(page + i) = (byte)0;
-	}
+	memset(page + IBUF_BITMAP, 0, byte_offset);
+
+	/* The remaining area (up to the page trailer) is uninitialized. */
 
 	mlog_write_initial_log_record(page, MLOG_IBUF_BITMAP_INIT, mtr);
 }
@@ -621,8 +620,9 @@
 	page_t*	page,	/* in: bitmap page */
 	ulint	page_no,/* in: page whose bits to get */
 	ulint	bit,	/* in: IBUF_BITMAP_FREE, IBUF_BITMAP_BUFFERED, ... */
-	mtr_t*	mtr __attribute__((unused))) /* in: mtr containing an x-latch
-                                               to the bitmap page */
+	mtr_t*	mtr __attribute__((unused)))	/* in: mtr containing an
+						x-latch to the bitmap
+						page */
 {
 	ulint	byte_offset;
 	ulint	bit_offset;
@@ -630,12 +630,14 @@
 	ulint	value;
 
 	ut_ad(bit < IBUF_BITS_PER_PAGE);
-	ut_ad(IBUF_BITS_PER_PAGE % 2 == 0);
+#if IBUF_BITS_PER_PAGE % 2
+# error "IBUF_BITS_PER_PAGE % 2 != 0"
+#endif
 	ut_ad(mtr_memo_contains(mtr, buf_block_align(page),
 						MTR_MEMO_PAGE_X_FIX));
 
 	bit_offset = (page_no % XDES_DESCRIBED_PER_PAGE) * IBUF_BITS_PER_PAGE
-		     + bit;
+		+ bit;
 
 	byte_offset = bit_offset / 8;
 	bit_offset = bit_offset % 8;
@@ -648,7 +650,7 @@
 
 	if (bit == IBUF_BITMAP_FREE) {
 		ut_ad(bit_offset + 1 < 8);
-		
+
 		value = value * 2 + ut_bit_get_nth(map_byte, bit_offset + 1);
 	}
 
@@ -672,15 +674,18 @@
 	ulint	map_byte;
 
 	ut_ad(bit < IBUF_BITS_PER_PAGE);
-	ut_ad(IBUF_BITS_PER_PAGE % 2 == 0);
+#if IBUF_BITS_PER_PAGE % 2
+# error "IBUF_BITS_PER_PAGE % 2 != 0"
+#endif
 	ut_ad(mtr_memo_contains(mtr, buf_block_align(page),
 						MTR_MEMO_PAGE_X_FIX));
 #ifdef UNIV_IBUF_DEBUG
 	ut_a((bit != IBUF_BITMAP_BUFFERED) || (val != FALSE)
-	      || (0 == ibuf_count_get(buf_frame_get_space_id(page), page_no)));
+		|| (0 == ibuf_count_get(buf_frame_get_space_id(page),
+				page_no)));
 #endif
 	bit_offset = (page_no % XDES_DESCRIBED_PER_PAGE) * IBUF_BITS_PER_PAGE
-		     + bit;
+		+ bit;
 
 	byte_offset = bit_offset / 8;
 	bit_offset = bit_offset % 8;
@@ -692,14 +697,14 @@
 	if (bit == IBUF_BITMAP_FREE) {
 		ut_ad(bit_offset + 1 < 8);
 		ut_ad(val <= 3);
-		
+
 		map_byte = ut_bit_set_nth(map_byte, bit_offset, val / 2);
 		map_byte = ut_bit_set_nth(map_byte, bit_offset + 1, val % 2);
 	} else {
 		ut_ad(val <= 1);
 		map_byte = ut_bit_set_nth(map_byte, bit_offset, val);
 	}
-	
+
 	mlog_write_ulint(page + IBUF_BITMAP + byte_offset, map_byte,
 							MLOG_1BYTE, mtr);
 }
@@ -715,8 +720,8 @@
 	ulint	page_no)	/* in: tablespace page number */
 {
 	return(FSP_IBUF_BITMAP_OFFSET
-	       + XDES_DESCRIBED_PER_PAGE
-					* (page_no / XDES_DESCRIBED_PER_PAGE));
+		+ XDES_DESCRIBED_PER_PAGE
+		* (page_no / XDES_DESCRIBED_PER_PAGE));
 }
 
 /************************************************************************
@@ -779,7 +784,7 @@
 		"Setting page no %lu free bits to %lu should be %lu\n",
 					buf_frame_get_page_no(page), val,
 				ibuf_index_page_calc_free(page)); */
-	
+
 	ut_a(val <= ibuf_index_page_calc_free(page));
 #endif
 	ibuf_bitmap_page_set_bits(bitmap_page, buf_frame_get_page_no(page),
@@ -818,7 +823,7 @@
 	}
 
 	mtr_start(&mtr);
-	
+
 	bitmap_page = ibuf_bitmap_get_map_page(buf_frame_get_space_id(page),
 					buf_frame_get_page_no(page), &mtr);
 
@@ -844,7 +849,7 @@
 				ibuf_index_page_calc_free(page)); */
 
 	ut_a(val <= ibuf_index_page_calc_free(page));
-#endif				
+#endif
 	ibuf_bitmap_page_set_bits(bitmap_page, buf_frame_get_page_no(page),
 						IBUF_BITMAP_FREE, val, &mtr);
 	mtr_commit(&mtr);
@@ -931,7 +936,7 @@
 	performed by another OS thread. */
 
 	mutex_enter(&ibuf_bitmap_mutex);
-	
+
 	state = ibuf_index_page_calc_free(page1);
 
 	ibuf_set_free_bits_low(index->type, page1, state, mtr);
@@ -1028,7 +1033,7 @@
 
 		return(FALSE);
 	}
-#endif	
+#endif
 	if (ibuf_fixed_addr_page(page_no)) {
 
 		return(TRUE);
@@ -1122,13 +1127,18 @@
 {
 	dict_table_t*	table;
 	dict_index_t*	index;
+
 	table = dict_mem_table_create("IBUF_DUMMY",
-			DICT_HDR_SPACE, n, comp);
+		DICT_HDR_SPACE, n, comp ? DICT_TF_COMPACT : 0);
+
 	index = dict_mem_index_create("IBUF_DUMMY", "IBUF_DUMMY",
-			DICT_HDR_SPACE, 0, n);
+		DICT_HDR_SPACE, 0, n);
+
 	index->table = table;
+
 	/* avoid ut_ad(index->cached) in dict_index_get_n_unique_in_tree */
 	index->cached = TRUE;
+
 	return(index);
 }
 /************************************************************************
@@ -1136,7 +1146,7 @@
 static
 void
 ibuf_dummy_index_add_col(
-/*====================*/
+/*=====================*/
 	dict_index_t*	index,	/* in: dummy index */
 	dtype_t*	type,	/* in: the data type of the column */
 	ulint		len)	/* in: length of the column */
@@ -1148,7 +1158,7 @@
 		dtype_get_len(type),
 		dtype_get_prec(type));
 	dict_index_add_col(index,
-		dict_table_get_nth_col(index->table, i), 0, len);
+		dict_table_get_nth_col(index->table, i), len);
 }
 /************************************************************************
 Deallocates a dummy index for inserting a record to a non-clustered index.
@@ -1156,13 +1166,13 @@
 static
 void
 ibuf_dummy_index_free(
-/*====================*/
+/*==================*/
 	dict_index_t*	index)	/* in: dummy index */
 {
 	dict_table_t*	table = index->table;
-
-	dict_mem_index_free(index);
-	dict_mem_table_free(table);
+	mem_heap_free(index->heap);
+	mutex_free(&(table->autoinc_mutex));
+	mem_heap_free(table->heap);
 }
 
 /*************************************************************************
@@ -1378,7 +1388,7 @@
 	byte*		buf;
 	byte*		buf2;
 	ulint		i;
-	
+
 	/* Starting from 4.1.x, we have to build a tuple whose
 	(1) first field is the space id,
 	(2) the second field a single marker byte (0) to tell that this
@@ -1467,7 +1477,7 @@
 	dtuple_set_types_binary(tuple, n_fields + 4);
 
 	return(tuple);
-}	
+}
 
 /*************************************************************************
 Builds a search tuple used to search buffered inserts for an index page.
@@ -1484,7 +1494,7 @@
 	dtuple_t*	tuple;
 	dfield_t*	field;
 	byte*		buf;
-	
+
 	ut_a(space == 0);
 	ut_a(trx_doublewrite_must_reset_space_ids);
 	ut_a(!trx_sys_multiple_tablespace_format);
@@ -1521,7 +1531,7 @@
 	dtuple_t*	tuple;
 	dfield_t*	field;
 	byte*		buf;
-	
+
 	ut_a(trx_sys_multiple_tablespace_format);
 
 	tuple = dtuple_create(heap, 3);
@@ -1637,7 +1647,7 @@
 	/* Acquire the fsp latch before the ibuf header, obeying the latching
 	order */
 	mtr_x_lock(fil_space_get_latch(space), &mtr);
-	
+
 	header_page = ibuf_header_page_get(space, &mtr);
 
 	/* Allocate a new page: NOTE that if the page has been a part of a
@@ -1674,9 +1684,10 @@
 	/* Add the page to the free list and update the ibuf size data */
 
 	flst_add_last(root + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST,
-		      page + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST_NODE, &mtr);
+		page + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST_NODE, &mtr);
 
-	fil_page_set_type(page, FIL_PAGE_IBUF_FREE_LIST);
+	mlog_write_ulint(page + FIL_PAGE_TYPE, FIL_PAGE_IBUF_FREE_LIST,
+						MLOG_2BYTES, &mtr);
 
 	ibuf_data->seg_size++;
 	ibuf_data->free_list_len++;
@@ -1721,7 +1732,7 @@
 	/* Acquire the fsp latch before the ibuf header, obeying the latching
 	order */
 	mtr_x_lock(fil_space_get_latch(space), &mtr);
-	
+
 	header_page = ibuf_header_page_get(space, &mtr);
 
 	/* Prevent pessimistic inserts to insert buffer trees for a while */
@@ -1736,16 +1747,16 @@
 		mutex_exit(&ibuf_mutex);
 
 		ibuf_exit();
-		
+
 		mutex_exit(&ibuf_pessimistic_insert_mutex);
 
 		mtr_commit(&mtr);
 
 		return;
 	}
-	
+
 	mtr_start(&mtr2);
-	
+
 	root = ibuf_tree_root_get(ibuf_data, space, &mtr2);
 
 	page_no = flst_get_last(root + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST,
@@ -1755,32 +1766,32 @@
 	/* NOTE that we must release the latch on the ibuf tree root
 	because in fseg_free_page we access level 1 pages, and the root
 	is a level 2 page. */
-		  
+
 	mtr_commit(&mtr2);
 	mutex_exit(&ibuf_mutex);
 
 	ibuf_exit();
-	
+
 	/* Since pessimistic inserts were prevented, we know that the
 	page is still in the free list. NOTE that also deletes may take
 	pages from the free list, but they take them from the start, and
 	the free list was so long that they cannot have taken the last
 	page from it. */
-	
+
 	fseg_free_page(header_page + IBUF_HEADER + IBUF_TREE_SEG_HEADER,
 							space, page_no, &mtr);
 #ifdef UNIV_DEBUG_FILE_ACCESSES
 	buf_page_reset_file_page_was_freed(space, page_no);
 #endif
 	ibuf_enter();
-							
+
 	mutex_enter(&ibuf_mutex);
 
 	root = ibuf_tree_root_get(ibuf_data, space, &mtr);
 
 	ut_ad(page_no == flst_get_last(root + PAGE_HEADER
 					+ PAGE_BTR_IBUF_FREE_LIST, &mtr)
-		  	 .page);
+			 .page);
 
 	page = buf_page_get(space, page_no, RW_X_LATCH, &mtr);
 
@@ -1789,13 +1800,13 @@
 #endif /* UNIV_SYNC_DEBUG */
 
 	/* Remove the page from the free list and update the ibuf size data */
-	
+
 	flst_remove(root + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST,
-		    page + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST_NODE, &mtr);
+		page + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST_NODE, &mtr);
 
 	ibuf_data->seg_size--;
 	ibuf_data->free_list_len--;
-		      
+
 	mutex_exit(&ibuf_pessimistic_insert_mutex);
 
 	/* Set the bit indicating that this page is no more an ibuf tree page
@@ -1818,7 +1829,7 @@
 /***************************************************************************
 Frees excess pages from the ibuf free list. This function is called when an OS
 thread calls fsp services to allocate a new file segment, or a new page to a
-file segment, and the thread did not own the fsp latch before this call. */ 
+file segment, and the thread did not own the fsp latch before this call. */
 
 void
 ibuf_free_excess_pages(
@@ -1829,7 +1840,7 @@
 	ulint		i;
 
 	if (space != 0) {
-	        fprintf(stderr,
+		fprintf(stderr,
 "InnoDB: Error: calling ibuf_free_excess_pages for space %lu\n", (ulong) space);
 		return;
 	}
@@ -1839,7 +1850,7 @@
 #endif /* UNIV_SYNC_DEBUG */
 	ut_ad(rw_lock_get_x_lock_count(fil_space_get_latch(space)) == 1);
 	ut_ad(!ibuf_inside());
-	
+
 	/* NOTE: We require that the thread did not own the latch before,
 	because then we know that we can obey the correct latching order
 	for ibuf latches */
@@ -1937,7 +1948,7 @@
 	n_pages = 0;
 	prev_page_no = 0;
 	prev_space_id = 0;
-	
+
 	/* Go backwards from the first rec until we reach the border of the
 	'merge area', or the page start or the limit of storeable pages is
 	reached */
@@ -1948,14 +1959,14 @@
 		rec_space_id = ibuf_rec_get_space(rec);
 
 		if (rec_space_id != first_space_id
-		    || rec_page_no / IBUF_MERGE_AREA
-		       != first_page_no / IBUF_MERGE_AREA) {
+			|| rec_page_no / IBUF_MERGE_AREA
+			!= first_page_no / IBUF_MERGE_AREA) {
 
-		    	break;
+			break;
 		}
-		
+
 		if (rec_page_no != prev_page_no
-		    || rec_space_id != prev_space_id) {
+			|| rec_space_id != prev_space_id) {
 			n_pages++;
 		}
 
@@ -1975,7 +1986,7 @@
 	prev_space_id = 0;
 	sum_volumes = 0;
 	volume_for_page = 0;
-	
+
 	while (*n_stored < limit) {
 		if (page_rec_is_supremum(rec)) {
 			/* When no more records available, mark this with
@@ -1992,19 +2003,19 @@
 		ut_a(*n_stored < IBUF_MAX_N_PAGES_MERGED);
 #endif
 		if ((rec_space_id != prev_space_id
-		     || rec_page_no != prev_page_no)
-                    && (prev_space_id != 0 || prev_page_no != 0)) {
+				|| rec_page_no != prev_page_no)
+			&& (prev_space_id != 0 || prev_page_no != 0)) {
 
 			if ((prev_page_no == first_page_no
-			     && prev_space_id == first_space_id)
-			    || contract
-			    || (volume_for_page >
-			     ((IBUF_MERGE_THRESHOLD - 1)
-			      * 4 * UNIV_PAGE_SIZE
-				    / IBUF_PAGE_SIZE_PER_FREE_SPACE)
-			     / IBUF_MERGE_THRESHOLD)) {
+					&& prev_space_id == first_space_id)
+				|| contract
+				|| (volume_for_page >
+					((IBUF_MERGE_THRESHOLD - 1)
+						* 4 * UNIV_PAGE_SIZE
+						/ IBUF_PAGE_SIZE_PER_FREE_SPACE)
+					/ IBUF_MERGE_THRESHOLD)) {
 
-			        space_ids[*n_stored] = prev_space_id;
+				space_ids[*n_stored] = prev_space_id;
 				space_versions[*n_stored]
 						= fil_space_get_version(
 							prev_space_id);
@@ -2016,10 +2027,10 @@
 			}
 
 			if (rec_space_id != first_space_id
-			    || rec_page_no / IBUF_MERGE_AREA
-			       != first_page_no / IBUF_MERGE_AREA) {
+				|| rec_page_no / IBUF_MERGE_AREA
+				!= first_page_no / IBUF_MERGE_AREA) {
 
-		    		break;
+				break;
 			}
 
 			volume_for_page = 0;
@@ -2034,7 +2045,7 @@
 		rec_volume = ibuf_rec_get_volume(rec);
 
 		volume_for_page += rec_volume;
-		
+
 		prev_page_no = rec_page_no;
 		prev_space_id = rec_space_id;
 
@@ -2081,7 +2092,7 @@
 
 	mutex_enter(&ibuf_mutex);
 
-	ut_ad(ibuf_validate_low());	
+	ut_ad(ibuf_validate_low());
 
 	/* Choose an ibuf tree at random (though there really is only one tree
 	in the current implementation) */
@@ -2096,15 +2107,15 @@
 	for (;;) {
 		if (!data->empty) {
 			all_trees_empty = FALSE;
-		
+
 			if (rnd_pos < data->size) {
 
 				break;
 			}
-		
+
 			rnd_pos -= data->size;
 		}
-			
+
 		data = UT_LIST_GET_NEXT(data_list, data);
 
 		if (data == NULL) {
@@ -2113,7 +2124,7 @@
 
 				return(0);
 			}
-			
+
 			data = UT_LIST_GET_FIRST(ibuf->data_list);
 		}
 	}
@@ -2127,7 +2138,7 @@
 	mtr_start(&mtr);
 
 	ibuf_enter();
-	
+
 	/* Open a cursor to a randomly chosen leaf of the tree, at a random
 	position within the leaf */
 
@@ -2136,19 +2147,19 @@
 	if (0 == page_get_n_recs(btr_pcur_get_page(&pcur))) {
 
 		/* This tree is empty */
-	    
-	    	data->empty = TRUE;
 
-	    	ibuf_exit();
+		data->empty = TRUE;
 
-	    	mtr_commit(&mtr);
-	    	btr_pcur_close(&pcur);
+		ibuf_exit();
 
-	    	mutex_exit(&ibuf_mutex);
+		mtr_commit(&mtr);
+		btr_pcur_close(&pcur);
+
+		mutex_exit(&ibuf_mutex);
 
-	    	goto loop;
+		goto loop;
 	}
-	
+
 	mutex_exit(&ibuf_mutex);
 
 	sum_sizes = ibuf_get_merge_page_nos(TRUE, btr_pcur_get_rec(&pcur),
@@ -2165,7 +2176,7 @@
 	buf_read_ibuf_merge_pages(sync, space_ids, space_versions, page_nos,
 								   n_stored);
 	*n_pages = n_stored;
-	
+
 	return(sum_sizes + 1);
 }
 
@@ -2204,13 +2215,13 @@
 			them */
 {
 	ulint	sum_bytes	= 0;
-	ulint	sum_pages 	= 0;
+	ulint	sum_pages	= 0;
 	ulint	n_bytes;
 	ulint	n_pag2;
-	
+
 	while (sum_pages < n_pages) {
 		n_bytes = ibuf_contract_ext(&n_pag2, sync);
-		
+
 		if (n_bytes == 0) {
 			return(sum_bytes);
 		}
@@ -2244,7 +2255,7 @@
 	}
 
 	sync = FALSE;
-	
+
 	if (ibuf->size >= ibuf->max_size + IBUF_CONTRACT_ON_INSERT_SYNC) {
 
 		sync = TRUE;
@@ -2291,7 +2302,7 @@
 	page_t*	prev_page;
 	ulint	next_page_no;
 	page_t*	next_page;
-	
+
 	ut_a(trx_sys_multiple_tablespace_format);
 
 	ut_ad((pcur->latch_mode == BTR_MODIFY_PREV)
@@ -2301,7 +2312,7 @@
 	pcur */
 
 	volume = 0;
-	
+
 	rec = btr_pcur_get_rec(pcur);
 
 	page = buf_frame_align(rec);
@@ -2315,9 +2326,9 @@
 
 			break;
 		}
-		
+
 		if (page_no != ibuf_rec_get_page_no(rec)
-		    || space != ibuf_rec_get_space(rec)) {
+			|| space != ibuf_rec_get_space(rec)) {
 
 			goto count_later;
 		}
@@ -2328,7 +2339,7 @@
 	}
 
 	/* Look at the previous page */
-	
+
 	prev_page_no = btr_page_get_prev(page, mtr);
 
 	if (prev_page_no == FIL_NULL) {
@@ -2344,19 +2355,19 @@
 
 	rec = page_get_supremum_rec(prev_page);
 	rec = page_rec_get_prev(rec);
-	
+
 	for (;;) {
 		if (page_rec_is_infimum(rec)) {
 
 			/* We cannot go to yet a previous page, because we
 			do not have the x-latch on it, and cannot acquire one
 			because of the latching order: we have to give up */
-		
+
 			return(UNIV_PAGE_SIZE);
 		}
-		
+
 		if (page_no != ibuf_rec_get_page_no(rec)
-		    || space != ibuf_rec_get_space(rec)) {
+			|| space != ibuf_rec_get_space(rec)) {
 
 			goto count_later;
 		}
@@ -2365,7 +2376,7 @@
 
 		rec = page_rec_get_prev(rec);
 	}
-		
+
 count_later:
 	rec = btr_pcur_get_rec(pcur);
 
@@ -2378,9 +2389,9 @@
 
 			break;
 		}
-		
+
 		if (page_no != ibuf_rec_get_page_no(rec)
-		    || space != ibuf_rec_get_space(rec)) {
+			|| space != ibuf_rec_get_space(rec)) {
 
 			return(volume);
 		}
@@ -2391,7 +2402,7 @@
 	}
 
 	/* Look at the next page */
-	
+
 	next_page_no = btr_page_get_next(page, mtr);
 
 	if (next_page_no == FIL_NULL) {
@@ -2412,12 +2423,12 @@
 		if (page_rec_is_supremum(rec)) {
 
 			/* We give up */
-		
+
 			return(UNIV_PAGE_SIZE);
 		}
-		
+
 		if (page_no != ibuf_rec_get_page_no(rec)
-		    || space != ibuf_rec_get_space(rec)) {
+			|| space != ibuf_rec_get_space(rec)) {
 
 			return(volume);
 		}
@@ -2448,7 +2459,7 @@
 	ibuf_data = fil_space_get_ibuf_data(0);
 
 	ibuf_index = ibuf_data->index;
-	ut_a(!ibuf_index->table->comp);
+	ut_a(!dict_table_is_comp(ibuf_index->table));
 
 	ibuf_enter();
 
@@ -2468,7 +2479,7 @@
 		field = rec_get_nth_field_old(rec, 0, &len);
 
 		ut_a(len == 4);
-		
+
 		max_space_id = mach_read_from_4(field);
 	}
 
@@ -2518,7 +2529,7 @@
 	ulint		bits;
 	mtr_t		mtr;
 	mtr_t		bitmap_mtr;
-	
+
 	ut_a(!(index->type & DICT_CLUSTERED));
 	ut_ad(dtuple_check_typed(entry));
 
@@ -2543,7 +2554,7 @@
 
 #ifdef UNIV_IBUF_DEBUG
 		fputs("Ibuf too big\n", stderr);
-#endif		
+#endif
 		/* Use synchronous contract (== TRUE) */
 		ibuf_contract(TRUE);
 
@@ -2564,7 +2575,7 @@
 			mutex_exit(&ibuf_mutex);
 
 			ibuf_exit();
-			
+
 			mutex_exit(&ibuf_pessimistic_insert_mutex);
 
 			err = ibuf_add_free_page(0, ibuf_data);
@@ -2577,7 +2588,7 @@
 			mutex_enter(&ibuf_pessimistic_insert_mutex);
 
 			ibuf_enter();
-			
+
 			mutex_enter(&ibuf_mutex);
 		}
 	} else {
@@ -2588,12 +2599,12 @@
 
 	heap = mem_heap_create(512);
 
- 	/* Build the entry which contains the space id and the page number as
+	/* Build the entry which contains the space id and the page number as
 	the first fields and the type information for other fields, and which
 	will be inserted to the insert buffer. */
 
-	ibuf_entry = ibuf_entry_build(entry, index->table->comp,
-						space, page_no, heap);
+	ibuf_entry = ibuf_entry_build(entry, dict_table_is_comp(index->table),
+		space, page_no, heap);
 
 	/* Open a cursor to the insert buffer tree to calculate if we can add
 	the new entry to it without exceeding the free space limit for the
@@ -2610,7 +2621,7 @@
 #ifdef UNIV_IBUF_DEBUG
 	ut_a((buffered == 0) || ibuf_count_get(space, page_no));
 #endif
- 	mtr_start(&bitmap_mtr);
+	mtr_start(&bitmap_mtr);
 
 	bitmap_page = ibuf_bitmap_get_map_page(space, page_no, &bitmap_mtr);
 
@@ -2632,16 +2643,16 @@
 				> ibuf_index_page_calc_free_from_bits(bits)) {
 		mtr_commit(&bitmap_mtr);
 
- 		/* It may not fit */
+		/* It may not fit */
 		err = DB_STRONG_FAIL;
 
-		do_merge = TRUE; 
+		do_merge = TRUE;
 
 		ibuf_get_merge_page_nos(FALSE, btr_pcur_get_rec(&pcur),
 					space_ids, space_versions,
 					page_nos, &n_stored);
 		goto function_exit;
- 	}
+	}
 
 	/* Set the bitmap bit denoting that the insert buffer contains
 	buffered entries for this index page, if the bit is not set yet */
@@ -2654,9 +2665,9 @@
 	}
 
 	mtr_commit(&bitmap_mtr);
-						
+
 	cursor = btr_pcur_get_btr_cur(&pcur);
-	
+
 	if (mode == BTR_MODIFY_PREV) {
 		err = btr_cur_optimistic_insert(BTR_NO_LOCKING_FLAG, cursor,
 						ibuf_entry, &ins_rec,
@@ -2674,7 +2685,7 @@
 		because a pessimistic insert releases the tree x-latch,
 		which would cause the x-latching of the root after that to
 		break the latching order. */
-		
+
 		root = ibuf_tree_root_get(ibuf_data, 0, &mtr);
 
 		err = btr_cur_pessimistic_insert(BTR_NO_LOCKING_FLAG
@@ -2703,18 +2714,18 @@
 					ibuf_count_get(space, page_no) + 1);
 	}
 #endif
- 	if (mode == BTR_MODIFY_TREE) {
+	if (mode == BTR_MODIFY_TREE) {
 		ut_ad(ibuf_validate_low());
 
 		mutex_exit(&ibuf_mutex);
 		mutex_exit(&ibuf_pessimistic_insert_mutex);
 	}
-	
+
 	mtr_commit(&mtr);
- 	btr_pcur_close(&pcur);
+	btr_pcur_close(&pcur);
 	ibuf_exit();
 
- 	mem_heap_free(heap);
+	mem_heap_free(heap);
 
 	mutex_enter(&ibuf_mutex);
 
@@ -2722,13 +2733,13 @@
 		ibuf_data->empty = FALSE;
 		ibuf_data->n_inserts++;
 	}
-	
+
 	mutex_exit(&ibuf_mutex);
 
- 	if ((mode == BTR_MODIFY_TREE) && (err == DB_SUCCESS)) {
+	if ((mode == BTR_MODIFY_TREE) && (err == DB_SUCCESS)) {
 		ibuf_contract_after_insert(entry_size);
 	}
-	
+
 	if (do_merge) {
 #ifdef UNIV_IBUF_DEBUG
 		ut_a(n_stored <= IBUF_MAX_N_PAGES_MERGED);
@@ -2736,7 +2747,7 @@
 		buf_read_ibuf_merge_pages(FALSE, space_ids, space_versions,
 							page_nos, n_stored);
 	}
-	
+
 	return(err);
 }
 
@@ -2761,19 +2772,20 @@
 	ut_ad(dtuple_check_typed(entry));
 
 	ut_a(!(index->type & DICT_CLUSTERED));
-	
+
 	if (rec_get_converted_size(index, entry)
-		>= page_get_free_space_of_empty(index->table->comp) / 2) {
+		>= page_get_free_space_of_empty(
+			dict_table_is_comp(index->table)) / 2) {
 		return(FALSE);
 	}
-	
+
 	err = ibuf_insert_low(BTR_MODIFY_PREV, entry, index, space, page_no,
 									thr);
 	if (err == DB_FAIL) {
 		err = ibuf_insert_low(BTR_MODIFY_TREE, entry, index, space,
 							page_no, thr);
 	}
-	
+
 	if (err == DB_SUCCESS) {
 #ifdef UNIV_IBUF_DEBUG
 		/* fprintf(stderr, "Ibuf insert for page no %lu of index %s\n",
@@ -2787,7 +2799,7 @@
 		return(FALSE);
 	}
 }
-	
+
 /************************************************************************
 During merge, inserts to an index page a secondary index entry extracted
 from the insert buffer. */
@@ -2810,7 +2822,8 @@
 	ut_ad(ibuf_inside());
 	ut_ad(dtuple_check_typed(entry));
 
-	if (UNIV_UNLIKELY(index->table->comp != (ibool)!!page_is_comp(page))) {
+	if (UNIV_UNLIKELY(dict_table_is_comp(index->table)
+			!= (ibool)!!page_is_comp(page))) {
 		fputs(
 "InnoDB: Trying to insert a record from the insert buffer to an index page\n"
 "InnoDB: but the 'compact' flag does not match!\n", stderr);
@@ -2827,7 +2840,7 @@
 	dump:
 		buf_page_print(page);
 
-	        dtuple_print(stderr, entry);
+		dtuple_print(stderr, entry);
 
 		fputs(
 "InnoDB: The table where where this index record belongs\n"
@@ -2840,14 +2853,14 @@
 
 	low_match = page_cur_search(page, index, entry,
 						PAGE_CUR_LE, &page_cur);
-	
+
 	if (low_match == dtuple_get_n_fields(entry)) {
 		rec = page_cur_get_rec(&page_cur);
-		
+
 		btr_cur_del_unmark_for_ibuf(rec, mtr);
 	} else {
 		rec = page_cur_tuple_insert(&page_cur, entry, index, mtr);
-		
+
 		if (rec == NULL) {
 			/* If the record did not fit, reorganize */
 
@@ -2873,7 +2886,7 @@
 "\nInnoDB: The table where where this index record belongs\n"
 "InnoDB: is now probably corrupt. Please run CHECK TABLE on\n"
 "InnoDB: that table.\n", stderr);
-				
+
 				bitmap_page = ibuf_bitmap_get_map_page(
 						buf_frame_get_space_id(page),
 						buf_frame_get_page_no(page),
@@ -2887,7 +2900,7 @@
 
 				fputs(
 "InnoDB: Submit a detailed bug report to http://bugs.mysql.com\n", stderr);
-			}	
+			}
 		}
 	}
 }
@@ -2915,7 +2928,7 @@
 	ibuf_data_t*	ibuf_data;
 	page_t*		root;
 	ulint		err;
-	
+
 	ut_ad(ibuf_inside());
 
 	success = btr_cur_optimistic_delete(btr_pcur_get_btr_cur(pcur), mtr);
@@ -2930,7 +2943,7 @@
 #endif
 		return(FALSE);
 	}
-	
+
 	/* We have to resort to a pessimistic delete from ibuf */
 	btr_pcur_store_position(pcur, mtr);
 
@@ -2944,7 +2957,7 @@
 	mutex_enter(&ibuf_mutex);
 
 	mtr_start(mtr);
-	
+
 	success = btr_pcur_restore_position(BTR_MODIFY_TREE, pcur, mtr);
 
 	if (!success) {
@@ -3051,7 +3064,7 @@
 
 		return;
 	}
-#endif	
+#endif
 	if (ibuf_fixed_addr_page(page_no) || fsp_descr_page(page_no)
 					|| trx_sys_hdr_page(space, page_no)) {
 		return;
@@ -3103,12 +3116,12 @@
 
 	if (!trx_sys_multiple_tablespace_format) {
 		ut_a(trx_doublewrite_must_reset_space_ids);
-	        search_tuple = ibuf_search_tuple_build(space, page_no, heap);
+		search_tuple = ibuf_search_tuple_build(space, page_no, heap);
 	} else {
-	        search_tuple = ibuf_new_search_tuple_build(space, page_no,
+		search_tuple = ibuf_new_search_tuple_build(space, page_no,
 									heap);
 	}
-		
+
 	if (page) {
 		/* Move the ownership of the x-latch on the page to this OS
 		thread, so that we can acquire a second x-latch on it. This
@@ -3117,22 +3130,22 @@
 
 		block = buf_block_align(page);
 		rw_lock_x_lock_move_ownership(&(block->lock));
-		
+
 		if (fil_page_get_type(page) != FIL_PAGE_INDEX) {
 
 			corruption_noticed = TRUE;
-		
+
 			ut_print_timestamp(stderr);
 
 			mtr_start(&mtr);
 
 			fputs("  InnoDB: Dump of the ibuf bitmap page:\n",
 				stderr);
-			
+
 			bitmap_page = ibuf_bitmap_get_map_page(space, page_no,
 									&mtr);
 			buf_page_print(bitmap_page);
-		
+
 			mtr_commit(&mtr);
 
 			fputs("\nInnoDB: Dump of the page:\n", stderr);
@@ -3169,7 +3182,7 @@
 		buf_page_dbg_add_level(page, SYNC_TREE_NODE);
 #endif /* UNIV_SYNC_DEBUG */
 	}
-		
+
 	/* Position pcur in the insert buffer at the first entry for this
 	index page */
 	btr_pcur_open_on_user_rec(ibuf_data->index, search_tuple, PAGE_CUR_GE,
@@ -3187,7 +3200,7 @@
 
 		/* Check if the entry is for this index page */
 		if (ibuf_rec_get_page_no(ibuf_rec) != page_no
-		    || ibuf_rec_get_space(ibuf_rec) != space) {
+			|| ibuf_rec_get_space(ibuf_rec) != space) {
 			if (page) {
 				page_header_reset_last_insert(page, &mtr);
 			}
@@ -3198,7 +3211,7 @@
 			fputs("InnoDB: Discarding record\n ", stderr);
 			rec_print_old(stderr, ibuf_rec);
 			fputs("\n from the insert buffer!\n\n", stderr);
-	   	} else if (page) {
+		} else if (page) {
 			/* Now we have at pcur a record which should be
 			inserted to the index page; NOTE that the call below
 			copies pointers to fields in ibuf_rec, and we must
@@ -3208,12 +3221,12 @@
 			dulint		max_trx_id = page_get_max_trx_id(
 						buf_frame_align(ibuf_rec));
 			page_update_max_trx_id(page, max_trx_id);
-			
+
 			entry = ibuf_build_entry_from_ibuf_rec(ibuf_rec,
 							heap, &dummy_index);
 #ifdef UNIV_IBUF_DEBUG
 			volume += rec_get_converted_size(dummy_index, entry)
- 					+ page_dir_calc_reserved_space(1);
+					+ page_dir_calc_reserved_space(1);
 			ut_a(volume <= 4 * UNIV_PAGE_SIZE
 					/ IBUF_PAGE_SIZE_PER_FREE_SPACE);
 #endif
@@ -3223,7 +3236,7 @@
 		}
 
 		n_inserts++;
-		
+
 		/* Delete the record from ibuf */
 		if (ibuf_delete_rec(space, page_no, &pcur, search_tuple,
 								&mtr)) {
@@ -3235,7 +3248,7 @@
 
 		if (btr_pcur_is_after_last_on_page(&pcur, &mtr)) {
 			mtr_commit(&mtr);
- 			btr_pcur_close(&pcur);
+			btr_pcur_close(&pcur);
 
 			goto loop;
 		}
@@ -3274,13 +3287,13 @@
 					n_inserts, volume, page_no); */
 #endif
 	mtr_commit(&mtr);
- 	btr_pcur_close(&pcur);
+	btr_pcur_close(&pcur);
 	mem_heap_free(heap);
 
 	/* Protect our statistics keeping from race conditions */
 	mutex_enter(&ibuf_mutex);
 
-	ibuf_data->n_merges++;	
+	ibuf_data->n_merges++;
 	ibuf_data->n_merged_recs += n_inserts;
 
 	mutex_exit(&ibuf_mutex);
@@ -3328,7 +3341,7 @@
 	cursor positioned at the first entry for this space id */
 
 	search_tuple = ibuf_new_search_tuple_build(space, 0, heap);
-		
+
 	n_inserts = 0;
 loop:
 	ibuf_enter();
@@ -3359,7 +3372,7 @@
 		page_no = ibuf_rec_get_page_no(ibuf_rec);
 
 		n_inserts++;
-		
+
 		/* Delete the record from ibuf */
 		closed = ibuf_delete_rec(space, page_no, &pcur, search_tuple,
 									&mtr);
@@ -3374,7 +3387,7 @@
 
 		if (btr_pcur_is_after_last_on_page(&pcur, &mtr)) {
 			mtr_commit(&mtr);
- 			btr_pcur_close(&pcur);
+			btr_pcur_close(&pcur);
 
 			ibuf_exit();
 
@@ -3384,12 +3397,12 @@
 
 leave_loop:
 	mtr_commit(&mtr);
- 	btr_pcur_close(&pcur);
+	btr_pcur_close(&pcur);
 
 	/* Protect our statistics keeping from race conditions */
 	mutex_enter(&ibuf_mutex);
 
-	ibuf_data->n_merges++;	
+	ibuf_data->n_merges++;
 	ibuf_data->n_merged_recs += n_inserts;
 
 	mutex_exit(&ibuf_mutex);
@@ -3420,12 +3433,12 @@
 #endif /* UNIV_SYNC_DEBUG */
 
 	sum_sizes = 0;
-	
+
 	data = UT_LIST_GET_FIRST(ibuf->data_list);
 
 	while (data) {
 		sum_sizes += data->size;
-	
+
 		data = UT_LIST_GET_NEXT(data_list, data);
 	}
 
@@ -3468,7 +3481,7 @@
 "InnoDB: run to completion.\n");
 		}
 	} else {
-	        ut_a(data->empty == FALSE);
+		ut_a(data->empty == FALSE);
 
 		is_empty = FALSE;
 	}
@@ -3503,10 +3516,10 @@
 
 	while (data) {
 	fprintf(file,
-  	"Ibuf for space %lu: size %lu, free list len %lu, seg size %lu,",
-			       (ulong) data->space, (ulong) data->size,
-			       (ulong) data->free_list_len,
-			       (ulong) data->seg_size);
+	"Ibuf for space %lu: size %lu, free list len %lu, seg size %lu,",
+		(ulong) data->space, (ulong) data->size,
+		(ulong) data->free_list_len,
+		(ulong) data->seg_size);
 
 		if (data->empty) {
 			fputs(" is empty\n", file);
@@ -3516,21 +3529,21 @@
 		fprintf(file,
 	"Ibuf for space %lu: size %lu, free list len %lu, seg size %lu,\n"
 			"%lu inserts, %lu merged recs, %lu merges\n",
-                               (ulong) data->space,
-                               (ulong) data->size,
-                               (ulong) data->free_list_len,
-			       (ulong) data->seg_size,
-			       (ulong) data->n_inserts,
-			       (ulong) data->n_merged_recs,
-			       (ulong) data->n_merges);
+			(ulong) data->space,
+			(ulong) data->size,
+			(ulong) data->free_list_len,
+			(ulong) data->seg_size,
+			(ulong) data->n_inserts,
+			(ulong) data->n_merged_recs,
+			(ulong) data->n_merges);
 #ifdef UNIV_IBUF_DEBUG
 		for (i = 0; i < IBUF_COUNT_N_PAGES; i++) {
 			if (ibuf_count_get(data->space, i) > 0) {
 
 				fprintf(stderr,
 					"Ibuf count for page %lu is %lu\n",
-				       (ulong) i,
-				       (ulong) ibuf_count_get(data->space, i));
+					(ulong) i,
+					(ulong) ibuf_count_get(data->space, i));
 			}
 		}
 #endif

--- 1.31.8.2/innobase/include/dict0dict.h	2006-04-21 01:07:32 +04:00
+++ 1.43/storage/innobase/include/dict0dict.h	2006-04-21 02:03:25 +04:00
@@ -26,6 +26,7 @@
 #include "ut0byte.h"
 #include "trx0types.h"
 
+#ifndef UNIV_HOTBACKUP
 /**********************************************************************
 Makes all characters in a NUL-terminated UTF-8 string lower case. */
 
@@ -33,6 +34,7 @@
 dict_casedn_str(
 /*============*/
 	char*	a);	/* in/out: string to put in lower case */
+#endif /* !UNIV_HOTBACKUP */
 /************************************************************************
 Get the database name length in a table name. */
 
@@ -98,15 +100,6 @@
 dict_col_get_clust_pos(
 /*===================*/
 	dict_col_t*	col);
-/********************************************************************
-If the given column name is reserved for InnoDB system columns, return
-TRUE. */
-
-ibool
-dict_col_name_is_reserved(
-/*======================*/
-				/* out: TRUE if name is reserved */
-	const char*	name);	/* in: column name */
 /************************************************************************
 Initializes the autoinc counter. It is not an error to initialize an already
 initialized counter. */
@@ -315,7 +308,7 @@
 UNIV_INLINE
 dict_table_t*
 dict_table_check_if_in_cache_low(
-/*==============================*/
+/*=============================*/
 					/* out: table, NULL if not found */
 	const char*	table_name);	/* in: table name */
 /**************************************************************************
@@ -494,6 +487,23 @@
 	dict_table_t*	table,	/* in: table */
 	ulint		sys);	/* in: DATA_ROW_ID, ... */
 /************************************************************************
+Check whether the table uses the compact page format. */
+UNIV_INLINE
+ibool
+dict_table_is_comp(
+/*===============*/
+					/* out: TRUE if table uses the
+					compact page format */
+	const dict_table_t*	table);	/* in: table */
+/************************************************************************
+Non inlined version of 'dict_table_is_comp' above. */
+ibool
+innodb_dict_table_is_comp(
+/*===============*/
+					/* out: TRUE if table uses the
+					compact page format */
+	const dict_table_t*	table);	/* in: table */
+/************************************************************************
 Checks if a column is in the ordering columns of the clustered index of a
 table. Column prefixes are treated like whole columns. */
 
@@ -669,7 +679,6 @@
 /*===============*/
 	dict_index_t*	index,		/* in: index */
 	dict_col_t*	col,		/* in: column */
-	ulint		order,		/* in: order criterion */
 	ulint		prefix_len);	/* in: column prefix length */
 /***********************************************************************
 Copies types of fields contained in index to tuple. */
@@ -689,13 +698,6 @@
 				/* out: index tree */
 	dict_index_t*	index);	/* in: index */
 /*************************************************************************
-Gets the field order criterion. */
-UNIV_INLINE
-ulint
-dict_field_get_order(
-/*=================*/
-	dict_field_t*	field);
-/*************************************************************************
 Gets the field column. */
 UNIV_INLINE
 dict_col_t*
@@ -780,7 +782,7 @@
 				pointer */
 	ulint		page_no,/* in: page number to put in node pointer */
 	mem_heap_t*	heap,	/* in: memory heap where pointer created */
-	ulint           level);  /* in: level of rec in tree: 0 means leaf
+	ulint		level);	 /* in: level of rec in tree: 0 means leaf
 				level */
 /**************************************************************************
 Copies an initial segment of a physical record, long enough to specify an
@@ -882,7 +884,7 @@
 /*=======================*/
 	dict_table_t*	table,		/* in: table */
 	ibool		has_dict_mutex);/* in: TRUE if the caller has the
-					dictionary mutex */	
+					dictionary mutex */
 /*************************************************************************
 Calculates new estimates for table and index statistics. The statistics
 are used in query optimization. */
@@ -950,13 +952,13 @@
 					header and flushed to a file; in
 					recovery this must be derived from
 					the log records */
-	hash_table_t* 	table_hash;	/* hash table of the tables, based
+	hash_table_t*	table_hash;	/* hash table of the tables, based
 					on name */
-	hash_table_t* 	table_id_hash;	/* hash table of the tables, based
+	hash_table_t*	table_id_hash;	/* hash table of the tables, based
 					on id */
-	hash_table_t* 	col_hash;	/* hash table of the columns */
+	hash_table_t*	col_hash;	/* hash table of the columns */
 	UT_LIST_BASE_NODE_T(dict_table_t)
-			table_LRU; 	/* LRU list of tables */
+			table_LRU;	/* LRU list of tables */
 	ulint		size;		/* varying space in bytes occupied
 					by the data dictionary table and
 					index objects */

--- 1.24.2.2/innobase/include/dict0mem.h	2006-04-21 01:07:32 +04:00
+++ 1.28/storage/innobase/include/dict0mem.h	2006-04-21 02:03:25 +04:00
@@ -29,12 +29,9 @@
 combination of types */
 #define DICT_CLUSTERED	1	/* clustered index */
 #define DICT_UNIQUE	2	/* unique index */
-#define	DICT_UNIVERSAL 	4	/* index which can contain records from any
+#define	DICT_UNIVERSAL	4	/* index which can contain records from any
 				other index */
-#define	DICT_IBUF 	8	/* insert buffer tree */
-				
-/* Flags for ordering an index field: OR'ing of the flags allowed */
-#define	DICT_DESCEND	1	/* in descending order (default ascending) */
+#define	DICT_IBUF	8	/* insert buffer tree */
 
 /* Types for a table object */
 #define DICT_TABLE_ORDINARY		1
@@ -42,6 +39,9 @@
 #define	DICT_TABLE_CLUSTER		3 /* this means that the table is
 					  really a cluster definition */
 
+/* Table flags */
+#define DICT_TF_COMPACT			1	/* compact page format */
+
 /**************************************************************************
 Creates a table memory object. */
 
@@ -55,14 +55,7 @@
 					is ignored if the table is made
 					a member of a cluster */
 	ulint		n_cols,		/* in: number of columns */
-	ibool		comp);		/* in: TRUE=compact page format */
-/********************************************************************
-Free a table memory object. */
-
-void
-dict_mem_table_free(
-/*================*/
-	dict_table_t*	table);		/* in: table */
+	ulint		flags);		/* in: table flags */
 /**************************************************************************
 Creates a cluster memory object. */
 
@@ -123,8 +116,6 @@
 /*=====================*/
 	dict_index_t*	index,		/* in: index */
 	const char*	name,		/* in: column name */
-	ulint		order,		/* in: order criterion; 0 means an
-					ascending order */
 	ulint		prefix_len);	/* in: 0 or the column prefix length
 					in a MySQL index like
 					INDEX (textcol(25)) */
@@ -155,7 +146,7 @@
 	const char*	name;	/* name */
 	dtype_t		type;	/* data type */
 	dict_table_t*	table;	/* back pointer to table of this column */
-	ulint		aux;	/* this is used as an auxiliary variable 
+	ulint		aux;	/* this is used as an auxiliary variable
 				in some of the functions below */
 };
 
@@ -170,8 +161,6 @@
 struct dict_field_struct{
 	dict_col_t*	col;		/* pointer to the table column */
 	const char*	name;		/* name of the column */
-	ulint		order;		/* flags for ordering this field:
-					DICT_DESCEND, ... */
 	ulint		prefix_len;	/* 0 or the length of the column
 					prefix in bytes in a MySQL index of
 					type, e.g., INDEX (textcol(25));
@@ -182,10 +171,6 @@
 	ulint		fixed_len;	/* 0 or the fixed length of the
 					column if smaller than
 					DICT_MAX_INDEX_COL_LEN */
-	ulint		fixed_offs;	/* offset to the field, or
-					ULINT_UNDEFINED if it is not fixed
-					within the record (due to preceding
-					variable-length fields) */
 };
 
 /* Data structure for an index tree */
@@ -287,7 +272,7 @@
 					constraint is defined: we allow the
 					indexes to contain more fields than
 					mentioned in the constraint, as long
-					as the first fields are as mentioned */ 
+					as the first fields are as mentioned */
 	dict_index_t*	foreign_index;	/* foreign index; we require that
 					both tables contain explicitly defined
 					indexes for the constraint: InnoDB
@@ -318,6 +303,7 @@
 struct dict_table_struct{
 	dulint		id;	/* id of the table or cluster */
 	ulint		type;	/* DICT_TABLE_ORDINARY, ... */
+	ulint		flags;	/* DICT_TF_COMPACT, ... */
 	mem_heap_t*	heap;	/* memory heap */
 	const char*	name;	/* table name */
 	const char*	dir_path_of_temp_table;/* NULL or the directory path
@@ -335,7 +321,6 @@
 	ibool		tablespace_discarded;/* this flag is set TRUE when the
 				user calls DISCARD TABLESPACE on this table,
 				and reset to FALSE in IMPORT TABLESPACE */
-	ibool		comp;	/* flag: TRUE=compact page format */
 	hash_node_t	name_hash; /* hash chain node */
 	hash_node_t	id_hash; /* hash chain node */
 	ulint		n_def;	/* number of columns defined so far */
@@ -352,7 +337,7 @@
 				which refer to this table */
 	UT_LIST_NODE_T(dict_table_t)
 			table_LRU; /* node of the LRU list of tables */
-	ulint		mem_fix;/* count of how many times the table 
+	ulint		mem_fix;/* count of how many times the table
 				and its indexes has been fixed in memory;
 				currently NOT used */
 	ulint		n_mysql_handles_opened;
@@ -415,9 +400,9 @@
 				database pages */
 	ulint		stat_sum_of_other_index_sizes;
 				/* other indexes in database pages */
-	ibool           stat_initialized; /* TRUE if statistics have
+	ibool		stat_initialized; /* TRUE if statistics have
 				been calculated the first time
-			        after database startup or table creation */
+				after database startup or table creation */
 	ulint		stat_modified_counter;
 				/* when a row is inserted, updated, or deleted,
 				we add 1 to this number; we calculate new
@@ -439,11 +424,11 @@
 				inited; MySQL gets the init value by executing
 				SELECT MAX(auto inc column) */
 	ib_longlong	autoinc;/* autoinc counter value to give to the
-				next inserted row */	
+				next inserted row */
 	ulint		magic_n;/* magic number */
 };
 #define	DICT_TABLE_MAGIC_N	76333786
-					
+
 #ifndef UNIV_NONINL
 #include "dict0mem.ic"
 #endif

--- 1.40.6.2/innobase/include/univ.i	2006-04-21 01:07:32 +04:00
+++ 1.50/storage/innobase/include/univ.i	2006-04-21 02:03:25 +04:00
@@ -40,7 +40,9 @@
 #undef VERSION
 
 /* Include the header file generated by GNU autoconf */
+#ifndef __WIN__
 #include "../ib_config.h"
+#endif
 
 #ifdef HAVE_SCHED_H
 #include <sched.h>
@@ -80,6 +82,10 @@
 
 /* Make a non-inline debug version */
 
+/* You can remove this define when the release is stable. This define adds
+some consistency checks to code. They use a little CPU time. */
+#define UNIV_RELEASE_NOT_YET_STABLE
+
 /*
 #define UNIV_DEBUG
 #define UNIV_MEM_DEBUG
@@ -159,7 +165,7 @@
 #define UNIV_PAGE_SIZE          (2 * 8192) /* NOTE! Currently, this has to be a
 					power of 2 */
 /* The 2-logarithm of UNIV_PAGE_SIZE: */
-#define UNIV_PAGE_SIZE_SHIFT	14					
+#define UNIV_PAGE_SIZE_SHIFT	14
 
 /* Maximum number of parallel threads in a parallelized operation */
 #define UNIV_MAX_PARALLELISM	32
@@ -228,7 +234,7 @@
 on disk, we must have also this number fit in 32 bits, also in 64-bit
 computers! */
 
-#define UNIV_SQL_NULL 	ULINT32_UNDEFINED
+#define UNIV_SQL_NULL ULINT32_UNDEFINED
 
 /* Lengths which are not UNIV_SQL_NULL, but bigger than the following
 number indicate that a field contains a reference to an externally

--- 1.46.4.1/innobase/log/log0recv.c	2006-04-21 01:07:32 +04:00
+++ 1.52/storage/innobase/log/log0recv.c	2006-04-21 02:03:25 +04:00
@@ -90,7 +90,7 @@
 use these free frames to read in pages when we start applying the
 log records to the database. */
 
-ulint  recv_n_pool_free_frames         = 256;
+ulint	recv_n_pool_free_frames		= 256;
 
 /* The maximum lsn we see for a page during the recovery process. If this
 is bigger than the lsn we are able to scan up to, that is an indication that
@@ -149,7 +149,7 @@
 
 	recv_sys->addr_hash = hash_create(available_memory / 64);
 	recv_sys->n_addrs = 0;
-	
+
 	recv_sys->apply_log_recs = FALSE;
 	recv_sys->apply_batch_on = FALSE;
 
@@ -178,11 +178,11 @@
 		fprintf(stderr,
 "InnoDB: Error: %lu pages with log records were left unprocessed!\n"
 "InnoDB: Maximum page number with log records on it %lu\n",
-			(ulong) recv_sys->n_addrs, 
+			(ulong) recv_sys->n_addrs,
 			(ulong) recv_max_parsed_page_no);
 		ut_error;
 	}
-	
+
 	hash_table_free(recv_sys->addr_hash);
 	mem_heap_empty(recv_sys->heap);
 
@@ -197,7 +197,7 @@
 /*===============*/
 {
 	mutex_enter(&(recv_sys->mutex));
-	
+
 	hash_table_free(recv_sys->addr_hash);
 	mem_heap_free(recv_sys->heap);
 	ut_free(recv_sys->buf);
@@ -241,7 +241,7 @@
 	finish_lsn1 = ut_dulint_add(ut_dulint_align_down(archived_lsn,
 						OS_FILE_LOG_BLOCK_SIZE),
 					log_group_get_capacity(group));
-					
+
 	finish_lsn2 = ut_dulint_add(ut_dulint_align_up(recovered_lsn,
 						OS_FILE_LOG_BLOCK_SIZE),
 					recv_sys->last_log_buf_size);
@@ -255,8 +255,8 @@
 		/* It is enough to erase the length of the log buffer */
 		finish_lsn = ut_dulint_get_min(finish_lsn1, finish_lsn2);
 	}
-				
-	ut_a(RECV_SCAN_SIZE <= log_sys->buf_size);	
+
+	ut_a(RECV_SCAN_SIZE <= log_sys->buf_size);
 
 	/* Write the log buffer full of zeros */
 	for (i = 0; i < RECV_SCAN_SIZE; i++) {
@@ -266,7 +266,7 @@
 
 	start_lsn = ut_dulint_align_down(recovered_lsn,
 						OS_FILE_LOG_BLOCK_SIZE);
-	
+
 	if (ut_dulint_cmp(start_lsn, recovered_lsn) != 0) {
 		/* Copy the last incomplete log block to the log buffer and
 		edit its data length: */
@@ -276,22 +276,22 @@
 		log_block_set_data_len(log_sys->buf,
 				ut_dulint_minus(recovered_lsn, start_lsn));
 	}
-				
+
 	if (ut_dulint_cmp(start_lsn, finish_lsn) >= 0) {
 
 		return;
 	}
 
-    	for (;;) {
+	for (;;) {
 		end_lsn = ut_dulint_add(start_lsn, RECV_SCAN_SIZE);
-    	
+
 		if (ut_dulint_cmp(end_lsn, finish_lsn) > 0) {
 
 			end_lsn = finish_lsn;
 		}
 
 		len = ut_dulint_minus(end_lsn, start_lsn);
-		
+
 		log_group_write_buf(group, log_sys->buf, len, start_lsn, 0);
 		if (ut_dulint_cmp(end_lsn, finish_lsn) >= 0) {
 
@@ -330,14 +330,14 @@
 
 		return;
 	}
-					
+
 	ut_a(RECV_SCAN_SIZE <= log_sys->buf_size);
 
 	start_lsn = ut_dulint_align_down(group->scanned_lsn,
 						OS_FILE_LOG_BLOCK_SIZE);
-    	for (;;) {
+	for (;;) {
 		end_lsn = ut_dulint_add(start_lsn, RECV_SCAN_SIZE);
-    	
+
 		if (ut_dulint_cmp(end_lsn, recovered_lsn) > 0) {
 			end_lsn = ut_dulint_align_up(recovered_lsn,
 						OS_FILE_LOG_BLOCK_SIZE);
@@ -347,9 +347,9 @@
 					up_to_date_group, start_lsn, end_lsn);
 
 		len = ut_dulint_minus(end_lsn, start_lsn);
-		
+
 		log_group_write_buf(group, log_sys->buf, len, start_lsn, 0);
-		
+
 		if (ut_dulint_cmp(end_lsn, recovered_lsn) >= 0) {
 
 			return;
@@ -441,7 +441,7 @@
 	fold = ut_fold_binary(buf, LOG_CHECKPOINT_CHECKSUM_1);
 
 	if ((fold & 0xFFFFFFFFUL) != mach_read_from_4(buf
-				+ LOG_CHECKPOINT_CHECKSUM_1)) {		
+				+ LOG_CHECKPOINT_CHECKSUM_1)) {
 		return(FALSE);
 	}
 
@@ -472,32 +472,32 @@
 	dulint		checkpoint_no;
 	ulint		field;
 	byte*		buf;
-	
+
 	group = UT_LIST_GET_FIRST(log_sys->log_groups);
 
 	max_no = ut_dulint_zero;
 	*max_group = NULL;
 	*max_field = 0;
-	
+
 	buf = log_sys->checkpoint_buf;
-	
+
 	while (group) {
 		group->state = LOG_GROUP_CORRUPTED;
-	
+
 		for (field = LOG_CHECKPOINT_1; field <= LOG_CHECKPOINT_2;
 				field += LOG_CHECKPOINT_2 - LOG_CHECKPOINT_1) {
-	
+
 			log_group_read_checkpoint_info(group, field);
 
 			if (!recv_check_cp_is_consistent(buf)) {
 #ifdef UNIV_DEBUG
 				if (log_debug_writes) {
-					fprintf(stderr, 
-	    "InnoDB: Checkpoint in group %lu at %lu invalid, %lu\n",
-						(ulong) group->id,
-						(ulong) field,
-                                 (ulong) mach_read_from_4(buf
-					      + LOG_CHECKPOINT_CHECKSUM_1));
+					fprintf(stderr,
+			"InnoDB: Checkpoint in group %lu at %lu invalid, %lu\n",
+					(ulong) group->id,
+					(ulong) field,
+					(ulong) mach_read_from_4(buf
+						+ LOG_CHECKPOINT_CHECKSUM_1));
 
 				}
 #endif /* UNIV_DEBUG */
@@ -515,7 +515,7 @@
 
 #ifdef UNIV_DEBUG
 			if (log_debug_writes) {
-				fprintf(stderr, 
+				fprintf(stderr,
 			"InnoDB: Checkpoint number %lu found in group %lu\n",
 				(ulong) ut_dulint_get_low(checkpoint_no),
 				(ulong) group->id);
@@ -591,21 +591,21 @@
 	}
 
 	cp_buf = hdr + max_cp;
-	
+
 	*lsn = mach_read_from_8(cp_buf + LOG_CHECKPOINT_LSN);
 	*offset = mach_read_from_4(cp_buf + LOG_CHECKPOINT_OFFSET);
 
 	/* If the user is running a pre-3.23.50 version of InnoDB, its
 	checkpoint data does not contain the fsp limit info */
 	if (mach_read_from_4(cp_buf + LOG_CHECKPOINT_FSP_MAGIC_N)
-	    == LOG_CHECKPOINT_FSP_MAGIC_N_VAL) {
-	
+		== LOG_CHECKPOINT_FSP_MAGIC_N_VAL) {
+
 		*fsp_limit = mach_read_from_4(
 				cp_buf + LOG_CHECKPOINT_FSP_FREE_LIMIT);
 
 		if (*fsp_limit == 0) {
 			*fsp_limit = 1000000000;
-		}	
+		}
 	} else {
 		*fsp_limit = 1000000000;
 	}
@@ -678,16 +678,16 @@
 	ulint	no;
 
 	*n_bytes_scanned = 0;
-	
+
 	for (log_block = buf; log_block < buf + buf_len;
 				log_block += OS_FILE_LOG_BLOCK_SIZE) {
-	
+
 		no = log_block_get_hdr_no(log_block);
 
 /*		fprintf(stderr, "Log block header no %lu\n", no); */
 
 		if (no != log_block_convert_lsn_to_no(*scanned_lsn)
-		    || !log_block_checksum_is_ok_or_old_format(log_block)) {
+			|| !log_block_checksum_is_ok_or_old_format(log_block)) {
 /*
 			fprintf(stderr,
 "Log block n:o %lu, scanned lsn n:o %lu\n",
@@ -700,16 +700,16 @@
 			fprintf(stderr,
 "Next log block n:o %lu\n",
 			log_block_get_hdr_no(log_block));
-*/			
+*/
 			break;
 		}
 
 		if (*scanned_checkpoint_no > 0
-		    && log_block_get_checkpoint_no(log_block)
-						< *scanned_checkpoint_no
-		    && *scanned_checkpoint_no
+			&& log_block_get_checkpoint_no(log_block)
+			< *scanned_checkpoint_no
+			&& *scanned_checkpoint_no
 			- log_block_get_checkpoint_no(log_block)
-							> 0x80000000UL) {
+			> 0x80000000UL) {
 
 			/* Garbage from a log buffer flush which was made
 			before the most recent database recovery */
@@ -729,7 +729,7 @@
 		*scanned_lsn = ut_dulint_add(*scanned_lsn, data_len);
 
 		*n_bytes_scanned += data_len;
-		
+
 		if (data_len < OS_FILE_LOG_BLOCK_SIZE) {
 			/* Log data ends here */
 
@@ -769,7 +769,8 @@
 		if (NULL != (ptr = mlog_parse_index(ptr, end_ptr,
 				type == MLOG_COMP_REC_INSERT, &index))) {
 			ut_a(!page
-			  || (ibool)!!page_is_comp(page)==index->table->comp);
+				|| (ibool)!!page_is_comp(page)
+				== dict_table_is_comp(index->table));
 			ptr = page_cur_parse_insert_rec(FALSE, ptr, end_ptr,
 							index, page, mtr);
 		}
@@ -778,7 +779,8 @@
 		if (NULL != (ptr = mlog_parse_index(ptr, end_ptr,
 			type == MLOG_COMP_REC_CLUST_DELETE_MARK, &index))) {
 			ut_a(!page
-			  || (ibool)!!page_is_comp(page)==index->table->comp);
+				|| (ibool)!!page_is_comp(page)
+				== dict_table_is_comp(index->table));
 			ptr = btr_cur_parse_del_mark_set_clust_rec(ptr,
 						end_ptr, index, page);
 		}
@@ -799,7 +801,8 @@
 		if (NULL != (ptr = mlog_parse_index(ptr, end_ptr,
 			type == MLOG_COMP_REC_UPDATE_IN_PLACE, &index))) {
 			ut_a(!page
-			  || (ibool)!!page_is_comp(page)==index->table->comp);
+				|| (ibool)!!page_is_comp(page)
+				== dict_table_is_comp(index->table));
 			ptr = btr_cur_parse_update_in_place(ptr, end_ptr,
 							page, index);
 		}
@@ -810,7 +813,8 @@
 			type == MLOG_COMP_LIST_END_DELETE
 			|| type == MLOG_COMP_LIST_START_DELETE, &index))) {
 			ut_a(!page
-			  || (ibool)!!page_is_comp(page)==index->table->comp);
+				|| (ibool)!!page_is_comp(page)
+				== dict_table_is_comp(index->table));
 			ptr = page_parse_delete_rec_list(type, ptr, end_ptr,
 							index, page, mtr);
 		}
@@ -819,7 +823,8 @@
 		if (NULL != (ptr = mlog_parse_index(ptr, end_ptr,
 			type == MLOG_COMP_LIST_END_COPY_CREATED, &index))) {
 			ut_a(!page
-			  || (ibool)!!page_is_comp(page)==index->table->comp);
+				|| (ibool)!!page_is_comp(page)
+				== dict_table_is_comp(index->table));
 			ptr = page_parse_copy_rec_list_to_created_page(ptr,
 						end_ptr, index, page, mtr);
 		}
@@ -828,7 +833,8 @@
 		if (NULL != (ptr = mlog_parse_index(ptr, end_ptr,
 				type == MLOG_COMP_PAGE_REORGANIZE, &index))) {
 			ut_a(!page
-			  || (ibool)!!page_is_comp(page)==index->table->comp);
+				|| (ibool)!!page_is_comp(page)
+				== dict_table_is_comp(index->table));
 			ptr = btr_parse_page_reorganize(ptr, end_ptr, index,
 								page, mtr);
 		}
@@ -862,7 +868,8 @@
 		if (NULL != (ptr = mlog_parse_index(ptr, end_ptr,
 				type == MLOG_COMP_REC_DELETE, &index))) {
 			ut_a(!page
-			  || (ibool)!!page_is_comp(page)==index->table->comp);
+				|| (ibool)!!page_is_comp(page)
+				== dict_table_is_comp(index->table));
 			ptr = page_cur_parse_delete_rec(ptr, end_ptr,
 							index, page, mtr);
 		}
@@ -890,11 +897,11 @@
 	ut_ad(!page || ptr);
 	if (index) {
 		dict_table_t*	table = index->table;
-
-		dict_mem_index_free(index);
-		dict_mem_table_free(table);
+		mem_heap_free(index->heap);
+		mutex_free(&(table->autoinc_mutex));
+		mem_heap_free(table->heap);
 	}
-	
+
 	return(ptr);
 }
 
@@ -973,7 +980,7 @@
 	recv_data_t*	recv_data;
 	recv_data_t**	prev_field;
 	recv_addr_t*	recv_addr;
-	
+
 	if (fil_tablespace_deleted_or_being_deleted_in_mem(space, -1)) {
 		/* The tablespace does not exist any more: do not store the
 		log record */
@@ -990,7 +997,7 @@
 	recv->end_lsn = end_lsn;
 
 	recv_addr = recv_get_fil_addr_struct(space, page_no);
-	
+
 	if (recv_addr == NULL) {
 		recv_addr = mem_heap_alloc(recv_sys->heap,
 							sizeof(recv_addr_t));
@@ -1015,15 +1022,15 @@
 	/* Store the log record body in chunks of less than UNIV_PAGE_SIZE:
 	recv_sys->heap grows into the buffer pool, and bigger chunks could not
 	be allocated */
-	
+
 	while (rec_end > body) {
 
 		len = rec_end - body;
-	
+
 		if (len > RECV_DATA_BLOCK_SIZE) {
 			len = RECV_DATA_BLOCK_SIZE;
 		}
-	
+
 		recv_data = mem_heap_alloc(recv_sys->heap,
 						sizeof(recv_data_t) + len);
 		*prev_field = recv_data;
@@ -1105,17 +1112,17 @@
 	if (recv_sys->apply_log_recs == FALSE) {
 
 		/* Log records should not be applied now */
-	
+
 		mutex_exit(&(recv_sys->mutex));
 
 		return;
 	}
-	
+
 	recv_addr = recv_get_fil_addr_struct(space, page_no);
 
 	if ((recv_addr == NULL)
-	    || (recv_addr->state == RECV_BEING_PROCESSED)
-	    || (recv_addr->state == RECV_PROCESSED)) {
+		|| (recv_addr->state == RECV_BEING_PROCESSED)
+		|| (recv_addr->state == RECV_PROCESSED)) {
 
 		mutex_exit(&(recv_sys->mutex));
 
@@ -1125,13 +1132,13 @@
 	/* fprintf(stderr, "Recovering space %lu, page %lu\n", space, page_no); */
 
 	recv_addr->state = RECV_BEING_PROCESSED;
-	
+
 	mutex_exit(&(recv_sys->mutex));
 
 	mtr_start(&mtr);
 	mtr_set_log_mode(&mtr, MTR_LOG_NONE);
 
-	if (!recover_backup) {	
+	if (!recover_backup) {
 		block = buf_block_align(page);
 
 		if (just_read_in) {
@@ -1160,11 +1167,11 @@
 	if (!recover_backup) {
 		/* It may be that the page has been modified in the buffer
 		pool: read the newest modification lsn there */
-		
+
 		page_newest_lsn = buf_frame_get_newest_modification(page);
 
 		if (!ut_dulint_is_zero(page_newest_lsn)) {
-		
+
 			page_lsn = page_newest_lsn;
 		}
 	} else {
@@ -1178,10 +1185,10 @@
 	start_lsn = end_lsn = ut_dulint_zero;
 
 	recv = UT_LIST_GET_FIRST(recv_addr->rec_list);
-	
+
 	while (recv) {
 		end_lsn = recv->end_lsn;
-	
+
 		if (recv->len > RECV_DATA_BLOCK_SIZE) {
 			/* We have to copy the record body to a separate
 			buffer */
@@ -1200,19 +1207,19 @@
 				- FIL_PAGE_END_LSN_OLD_CHKSUM, ut_dulint_zero);
 			mach_write_to_8(page + FIL_PAGE_LSN, ut_dulint_zero);
 		}
-		
+
 		if (ut_dulint_cmp(recv->start_lsn, page_lsn) >= 0) {
 
 			if (!modification_to_page) {
-		
+
 				modification_to_page = TRUE;
 				start_lsn = recv->start_lsn;
 			}
 
 #ifdef UNIV_DEBUG
 			if (log_debug_writes) {
-				fprintf(stderr, 
-     "InnoDB: Applying log rec type %lu len %lu to space %lu page no %lu\n",
+				fprintf(stderr,
+	"InnoDB: Applying log rec type %lu len %lu to space %lu page no %lu\n",
 					(ulong) recv->type, (ulong) recv->len,
 					(ulong) recv_addr->space,
 					(ulong) recv_addr->page_no);
@@ -1229,7 +1236,7 @@
 					ut_dulint_add(recv->start_lsn,
 							recv->len));
 		}
-						
+
 		if (recv->len > RECV_DATA_BLOCK_SIZE) {
 			mem_free(buf);
 		}
@@ -1238,7 +1245,7 @@
 	}
 
 	mutex_enter(&(recv_sys->mutex));
-	
+
 	if (ut_dulint_cmp(recv_max_page_lsn, page_lsn) < 0) {
 		recv_max_page_lsn = page_lsn;
 	}
@@ -1249,19 +1256,19 @@
 	recv_sys->n_addrs--;
 
 	mutex_exit(&(recv_sys->mutex));
-	
+
 	if (!recover_backup && modification_to_page) {
 		ut_a(block);
 
 		buf_flush_recv_note_modification(block, start_lsn, end_lsn);
 	}
-	
+
 	/* Make sure that committing mtr does not change the modification
 	lsn values of page */
-	
+
 	mtr.modifications = FALSE;
-	
-	mtr_commit(&mtr);	
+
+	mtr_commit(&mtr);
 }
 
 /***********************************************************************
@@ -1294,12 +1301,12 @@
 
 			if (recv_addr->state == RECV_NOT_PROCESSED) {
 				recv_addr->state = RECV_BEING_READ;
-	
+
 				page_nos[n] = page_no;
 
 				n++;
 			}
-			
+
 			mutex_exit(&(recv_sys->mutex));
 		}
 	}
@@ -1310,7 +1317,7 @@
 	*/
 	return(n);
 }
-			
+
 /***********************************************************************
 Empties the hash table of stored log records, applying them to appropriate
 pages. */
@@ -1359,7 +1366,7 @@
 	recv_sys->apply_batch_on = TRUE;
 
 	for (i = 0; i < hash_get_n_cells(recv_sys->addr_hash); i++) {
-		
+
 		recv_addr = HASH_GET_FIRST(recv_sys->addr_hash, i);
 
 		while (recv_addr) {
@@ -1368,13 +1375,13 @@
 
 			if (recv_addr->state == RECV_NOT_PROCESSED) {
 				if (!has_printed) {
-	    				ut_print_timestamp(stderr);
-					fputs( 
+					ut_print_timestamp(stderr);
+					fputs(
 "  InnoDB: Starting an apply batch of log records to the database...\n"
 "InnoDB: Progress in percents: ",stderr);
 					has_printed = TRUE;
 				}
-				
+
 				mutex_exit(&(recv_sys->mutex));
 
 				if (buf_page_peek(space, page_no)) {
@@ -1402,11 +1409,11 @@
 		}
 
 		if (has_printed
-		    && (i * 100) / hash_get_n_cells(recv_sys->addr_hash)
-		    != ((i + 1) * 100)
-		             / hash_get_n_cells(recv_sys->addr_hash)) {
+			&& (i * 100) / hash_get_n_cells(recv_sys->addr_hash)
+			!= ((i + 1) * 100)
+			/ hash_get_n_cells(recv_sys->addr_hash)) {
 
-		        fprintf(stderr, "%lu ",
+			fprintf(stderr, "%lu ",
 				(ulong) ((i * 100) / hash_get_n_cells(recv_sys->addr_hash)));
 		}
 	}
@@ -1420,11 +1427,11 @@
 		os_thread_sleep(500000);
 
 		mutex_enter(&(recv_sys->mutex));
-	}	
+	}
 
 	if (has_printed) {
 
-	        fprintf(stderr, "\n");
+		fprintf(stderr, "\n");
 	}
 
 	if (!allow_ibuf) {
@@ -1437,7 +1444,7 @@
 		n_pages = buf_flush_batch(BUF_FLUSH_LIST, ULINT_MAX,
 								ut_dulint_max);
 		ut_a(n_pages != ULINT_UNDEFINED);
-		
+
 		buf_flush_wait_batch_end(BUF_FLUSH_LIST);
 
 		buf_pool_invalidate();
@@ -1450,7 +1457,7 @@
 
 	recv_sys->apply_log_recs = FALSE;
 	recv_sys->apply_batch_on = FALSE;
-			
+
 	recv_sys_empty_hash();
 
 	if (has_printed) {
@@ -1491,11 +1498,11 @@
 	fputs(
 "InnoDB: Starting an apply batch of log records to the database...\n"
 "InnoDB: Progress in percents: ", stderr);
-	
+
 	n_hash_cells = hash_get_n_cells(recv_sys->addr_hash);
 
 	for (i = 0; i < n_hash_cells; i++) {
-	        /* The address hash table is externally chained */
+		/* The address hash table is externally chained */
 		recv_addr = hash_get_nth_cell(recv_sys->addr_hash, i)->node;
 
 		while (recv_addr != NULL) {
@@ -1505,7 +1512,7 @@
 				fprintf(stderr,
 "InnoDB: Warning: cannot apply log record to tablespace %lu page %lu,\n"
 "InnoDB: because tablespace with that id does not exist.\n",
-				      recv_addr->space, recv_addr->page_no);
+recv_addr->space, recv_addr->page_no);
 */
 				recv_addr->state = RECV_PROCESSED;
 
@@ -1535,10 +1542,11 @@
 						recv_addr->space,
 						recv_addr->page_no + 1);
 			if (!success) {
-			  fprintf(stderr,
-"InnoDB: Fatal error: cannot extend tablespace %lu to hold %lu pages\n",
-				     recv_addr->space, recv_addr->page_no);
-				     
+				fprintf(stderr,
+					"InnoDB: Fatal error: cannot extend"
+					" tablespace %lu to hold %lu pages\n",
+					recv_addr->space, recv_addr->page_no);
+
 				exit(1);
 			}
 
@@ -1549,16 +1557,18 @@
 					recv_addr->page_no, 0, UNIV_PAGE_SIZE,
 					page, NULL);
 			if (error != DB_SUCCESS) {
-			  fprintf(stderr,
-"InnoDB: Fatal error: cannot read from tablespace %lu page number %lu\n",
-				     (ulong) recv_addr->space, (ulong) recv_addr->page_no);
-				     
+				fprintf(stderr,
+			"InnoDB: Fatal error: cannot read from tablespace"
+			" %lu page number %lu\n",
+					(ulong) recv_addr->space,
+					(ulong) recv_addr->page_no);
+
 				exit(1);
 			}
 
 			/* Apply the log records to this page */
 			recv_recover_page(TRUE, FALSE, page, recv_addr->space,
-						       recv_addr->page_no);
+				recv_addr->page_no);
 
 			/* Write the page back to the tablespace file using the
 			fil0fil.c routines */
@@ -1577,7 +1587,7 @@
 		if ((100 * i) / n_hash_cells
 				!= (100 * (i + 1)) / n_hash_cells) {
 			fprintf(stderr, "%lu ",
-                                (ulong) ((100 * i) / n_hash_cells));
+				(ulong) ((100 * i) / n_hash_cells));
 			fflush(stderr);
 		}
 	}
@@ -1612,7 +1622,7 @@
 #ifdef UNIV_SYNC_DEBUG
 	buf_page_dbg_add_level(replica, SYNC_NO_ORDER_CHECK);
 #endif /* UNIV_SYNC_DEBUG */
-							
+
 	ptr = recv_parse_or_apply_log_rec_body(type, body, end_ptr, replica,
 									&mtr);
 	ut_a(ptr == end_ptr);
@@ -1624,7 +1634,7 @@
 
 	/* Make sure that committing mtr does not call log routines, as
 	we currently own the log mutex */
-	
+
 	mtr.modifications = FALSE;
 
 	mtr_commit(&mtr);
@@ -1654,8 +1664,8 @@
 			ut_error;
 		}
 	}
-}	
-			
+}
+
 /***********************************************************************
 In the debug version, checks that the replica of a file page is identical
 to the original page. */
@@ -1715,7 +1725,7 @@
 	page = buf_frame_alloc();
 
 	for (page_no = 0; page_no < n_pages; page_no++) {
-	
+
 		mtr_start(&mtr);
 
 		frame = buf_page_get_gen(space1, page_no, RW_S_LATCH, NULL,
@@ -1747,7 +1757,7 @@
 			fil_io(OS_FILE_READ, TRUE, space2, page_no, 0,
 				UNIV_PAGE_SIZE, replica, NULL);
 		}
-		
+
 		recv_check_identical(page + FIL_PAGE_DATA,
 			replica + FIL_PAGE_DATA,
 			PAGE_HEADER + PAGE_MAX_TRX_ID - FIL_PAGE_DATA);
@@ -1780,7 +1790,7 @@
 	mutex_enter(&(log_sys->mutex));
 
 	recv_apply_hashed_log_recs(FALSE);
-	
+
 	mutex_exit(&(log_sys->mutex));
 
 	recv_compare_spaces(space1, space2, n_pages);
@@ -1812,7 +1822,7 @@
 	}
 
 	if (*ptr == MLOG_MULTI_REC_END) {
-	
+
 		*type = *ptr;
 
 		return(1);
@@ -1832,7 +1842,7 @@
 
 	if (UNIV_UNLIKELY(!new_ptr)) {
 
-	        return(0);
+		return(0);
 	}
 
 	/* Check that page_no is sensible */
@@ -1854,7 +1864,7 @@
 	if (*page_no > recv_max_parsed_page_no) {
 		recv_max_parsed_page_no = *page_no;
 	}
-	
+
 	return(new_ptr - ptr);
 }
 
@@ -1870,15 +1880,15 @@
 {
 	ulint	frag_len;
 	ulint	lsn_len;
-	
+
 	frag_len = (ut_dulint_get_low(lsn) % OS_FILE_LOG_BLOCK_SIZE)
-		   					- LOG_BLOCK_HDR_SIZE;
+							- LOG_BLOCK_HDR_SIZE;
 	ut_ad(frag_len < OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_HDR_SIZE
-		      					- LOG_BLOCK_TRL_SIZE);
+							- LOG_BLOCK_TRL_SIZE);
 	lsn_len = len + ((len + frag_len)
-		    	 / (OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_HDR_SIZE
-		      					- LOG_BLOCK_TRL_SIZE))
-		     	 * (LOG_BLOCK_HDR_SIZE + LOG_BLOCK_TRL_SIZE);
+			 / (OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_HDR_SIZE
+							- LOG_BLOCK_TRL_SIZE))
+			 * (LOG_BLOCK_HDR_SIZE + LOG_BLOCK_TRL_SIZE);
 
 	return(ut_dulint_add(lsn, lsn_len));
 }
@@ -1898,12 +1908,12 @@
 	ulint	space;
 	ulint	page_no;
 	byte*	body;
-	
+
 	for (i = 0; i < len; i++) {
 		ut_a(0 == recv_parse_log_rec(ptr, ptr + i, &type, &space,
 							&page_no, &body));
 	}
-}		
+}
 
 /***********************************************************
 Prints diagnostic info of corrupt log. */
@@ -1922,29 +1932,29 @@
 "InnoDB: Log parsing proceeded successfully up to %lu %lu\n"
 "InnoDB: Previous log record type %lu, is multi %lu\n"
 "InnoDB: Recv offset %lu, prev %lu\n",
-	(ulong) type, (ulong) space, (ulong) page_no,
-	(ulong) ut_dulint_get_high(recv_sys->recovered_lsn),
-        (ulong) ut_dulint_get_low(recv_sys->recovered_lsn),
-	(ulong) recv_previous_parsed_rec_type,
-	(ulong) recv_previous_parsed_rec_is_multi,
-	(ulong) (ptr - recv_sys->buf),
-	(ulong) recv_previous_parsed_rec_offset);
+		(ulong) type, (ulong) space, (ulong) page_no,
+		(ulong) ut_dulint_get_high(recv_sys->recovered_lsn),
+		(ulong) ut_dulint_get_low(recv_sys->recovered_lsn),
+		(ulong) recv_previous_parsed_rec_type,
+		(ulong) recv_previous_parsed_rec_is_multi,
+		(ulong) (ptr - recv_sys->buf),
+		(ulong) recv_previous_parsed_rec_offset);
 
 	if ((ulint)(ptr - recv_sys->buf + 100)
-					> recv_previous_parsed_rec_offset
-	    && (ulint)(ptr - recv_sys->buf + 100
-					- recv_previous_parsed_rec_offset)
-	       < 200000) {
+		> recv_previous_parsed_rec_offset
+		&& (ulint)(ptr - recv_sys->buf + 100
+			- recv_previous_parsed_rec_offset)
+		< 200000) {
 		fputs(
 "InnoDB: Hex dump of corrupt log starting 100 bytes before the start\n"
 "InnoDB: of the previous log rec,\n"
 "InnoDB: and ending 100 bytes after the start of the corrupt rec:\n",
 			stderr);
- 
+
 		ut_print_buf(stderr,
-		     recv_sys->buf + recv_previous_parsed_rec_offset - 100,
-		     ptr - recv_sys->buf + 200 -
-					recv_previous_parsed_rec_offset);
+			recv_sys->buf + recv_previous_parsed_rec_offset - 100,
+			ptr - recv_sys->buf + 200 -
+			recv_previous_parsed_rec_offset);
 		putc('\n', stderr);
 	}
 
@@ -1984,7 +1994,7 @@
 	ulint	page_no;
 	byte*	body;
 	ulint	n_recs;
-	
+
 #ifdef UNIV_SYNC_DEBUG
 	ut_ad(mutex_own(&(log_sys->mutex)));
 #endif /* UNIV_SYNC_DEBUG */
@@ -2018,7 +2028,7 @@
 				recv_report_corrupt_log(ptr,
 						type, space, page_no);
 			}
-		
+
 			return(FALSE);
 		}
 
@@ -2032,7 +2042,7 @@
 
 			return(FALSE);
 		}
-		
+
 		recv_previous_parsed_rec_type = (ulint)type;
 		recv_previous_parsed_rec_offset = recv_sys->recovered_offset;
 		recv_previous_parsed_rec_is_multi = 0;
@@ -2042,7 +2052,7 @@
 
 #ifdef UNIV_DEBUG
 		if (log_debug_writes) {
-			fprintf(stderr, 
+			fprintf(stderr,
 "InnoDB: Parsed a single log rec type %lu len %lu space %lu page no %lu\n",
 				(ulong) type, (ulong) len, (ulong) space,
 				(ulong) page_no);
@@ -2051,10 +2061,10 @@
 
 		if (type == MLOG_DUMMY_RECORD) {
 			/* Do nothing */
-		
+
 		} else if (store_to_hash && (type == MLOG_FILE_CREATE
-					     || type == MLOG_FILE_RENAME
-					     || type == MLOG_FILE_DELETE)) {
+				|| type == MLOG_FILE_RENAME
+				|| type == MLOG_FILE_DELETE)) {
 #ifdef UNIV_HOTBACKUP
 			if (recv_replay_file_ops) {
 
@@ -2062,7 +2072,7 @@
 				operation, if possible; note that
 				fil_path_to_mysql_datadir is set in ibbackup to
 				point to the datadir we should use there */
-			
+
 				if (NULL == fil_op_log_parse_or_replay(body,
 						end_ptr, type, TRUE, space)) {
 					fprintf(stderr,
@@ -2099,19 +2109,19 @@
 
 		total_len = 0;
 		n_recs = 0;
-		
+
 		for (;;) {
 			len = recv_parse_log_rec(ptr, end_ptr, &type, &space,
 							&page_no, &body);
 			if (len == 0 || recv_sys->found_corrupt_log) {
 
-			    	if (recv_sys->found_corrupt_log) {
+				if (recv_sys->found_corrupt_log) {
 
 					recv_report_corrupt_log(ptr,
 						type, space, page_no);
-			    	}
+				}
 
-			    	return(FALSE);
+				return(FALSE);
 			}
 
 			recv_previous_parsed_rec_type = (ulint)type;
@@ -2133,7 +2143,7 @@
 
 #ifdef UNIV_DEBUG
 			if (log_debug_writes) {
-				fprintf(stderr, 
+				fprintf(stderr,
 "InnoDB: Parsed a multi log rec type %lu len %lu space %lu page no %lu\n",
 				(ulong) type, (ulong) len, (ulong) space,
 				(ulong) page_no);
@@ -2204,11 +2214,11 @@
 				recv_compare_replicate(space, page_no);
 #endif /* UNIV_LOG_REPLICATE */
 			}
-			
+
 			ptr += len;
 		}
 	}
-   
+
 	goto loop;
 }
 
@@ -2247,7 +2257,7 @@
 	} else if (ut_dulint_cmp(recv_sys->scanned_lsn, scanned_lsn) >= 0) {
 
 		return(FALSE);
-								
+
 	} else if (ut_dulint_cmp(recv_sys->parse_start_lsn,
 						recv_sys->scanned_lsn) > 0) {
 		more_len = ut_dulint_minus(scanned_lsn,
@@ -2260,7 +2270,7 @@
 
 		return(FALSE);
 	}
-	
+
 	ut_ad(data_len >= more_len);
 
 	start_offset = data_len - more_len;
@@ -2295,7 +2305,7 @@
 void
 recv_sys_justify_left_parsing_buf(void)
 /*===================================*/
-{	
+{
 	ut_memmove(recv_sys->buf, recv_sys->buf + recv_sys->recovered_offset,
 				recv_sys->len - recv_sys->recovered_offset);
 
@@ -2343,13 +2353,13 @@
 	ut_ad(len > 0);
 	ut_a(apply_automatically <= TRUE);
 	ut_a(store_to_hash <= TRUE);
-	
+
 	finished = FALSE;
-	
+
 	log_block = buf;
 	scanned_lsn = start_lsn;
 	more_data = FALSE;
-	
+
 	while (log_block < buf + len && !finished) {
 
 		no = log_block_get_hdr_no(log_block);
@@ -2360,11 +2370,11 @@
 				log_block_convert_lsn_to_no(scanned_lsn));
 */
 		if (no != log_block_convert_lsn_to_no(scanned_lsn)
-		    || !log_block_checksum_is_ok_or_old_format(log_block)) {
+			|| !log_block_checksum_is_ok_or_old_format(log_block)) {
 
 			if (no == log_block_convert_lsn_to_no(scanned_lsn)
-			    && !log_block_checksum_is_ok_or_old_format(
-								log_block)) {
+				&& !log_block_checksum_is_ok_or_old_format(
+					log_block)) {
 				fprintf(stderr,
 "InnoDB: Log block no %lu at lsn %lu %lu has\n"
 "InnoDB: ok header, but checksum field contains %lu, should be %lu\n",
@@ -2398,14 +2408,14 @@
 		data_len = log_block_get_data_len(log_block);
 
 		if ((store_to_hash || (data_len == OS_FILE_LOG_BLOCK_SIZE))
-		    && (ut_dulint_cmp(ut_dulint_add(scanned_lsn, data_len),
-						recv_sys->scanned_lsn) > 0)
-		    && (recv_sys->scanned_checkpoint_no > 0)
-		    && (log_block_get_checkpoint_no(log_block)
-		       < recv_sys->scanned_checkpoint_no)
-		    && (recv_sys->scanned_checkpoint_no
-			- log_block_get_checkpoint_no(log_block)
-			> 0x80000000UL)) {
+			&& (ut_dulint_cmp(ut_dulint_add(scanned_lsn, data_len),
+					recv_sys->scanned_lsn) > 0)
+			&& (recv_sys->scanned_checkpoint_no > 0)
+			&& (log_block_get_checkpoint_no(log_block)
+				< recv_sys->scanned_checkpoint_no)
+			&& (recv_sys->scanned_checkpoint_no
+				- log_block_get_checkpoint_no(log_block)
+				> 0x80000000UL)) {
 
 			/* Garbage from a log buffer flush which was made
 			before the most recent database recovery */
@@ -2418,8 +2428,8 @@
 			ut_error;
 #endif
 			break;
-		}		    
-		
+		}
+
 		if (ut_dulint_is_zero(recv_sys->parse_start_lsn)
 			&& (log_block_get_first_rec_group(log_block) > 0)) {
 
@@ -2457,7 +2467,7 @@
 			recv_sys->scanned_checkpoint_no =
 					log_block_get_checkpoint_no(log_block);
 		}
-						
+
 		if (data_len < OS_FILE_LOG_BLOCK_SIZE) {
 			/* Log data for this group ends here */
 
@@ -2470,12 +2480,12 @@
 	*group_scanned_lsn = scanned_lsn;
 
 	if (recv_needed_recovery
-	    || (recv_is_from_backup && !recv_is_making_a_backup)) {
+		|| (recv_is_from_backup && !recv_is_making_a_backup)) {
 		recv_scan_print_counter++;
 
 		if (finished || (recv_scan_print_counter % 80 == 0)) {
 
-			fprintf(stderr, 
+			fprintf(stderr,
 "InnoDB: Doing recovery: scanned up to log sequence number %lu %lu\n",
 				(ulong) ut_dulint_get_high(*group_scanned_lsn),
 				(ulong) ut_dulint_get_low(*group_scanned_lsn));
@@ -2488,27 +2498,27 @@
 		recv_parse_log_recs(store_to_hash);
 
 		if (store_to_hash && mem_heap_get_size(recv_sys->heap)
-						> available_memory
-		    && apply_automatically) {
-						
+			> available_memory
+			&& apply_automatically) {
+
 			/* Hash table of log records has grown too big:
 			empty it; FALSE means no ibuf operations
 			allowed, as we cannot add new records to the
 			log yet: they would be produced by ibuf
 			operations */
-		
+
 			recv_apply_hashed_log_recs(FALSE);
-		} 
+		}
 
 		if (recv_sys->recovered_offset > RECV_PARSING_BUF_SIZE / 4) {
 			/* Move parsing buffer data to the buffer start */
 
 			recv_sys_justify_left_parsing_buf();
-		}	
+		}
 	}
 
 	return(finished);
-}	
+}
 
 /***********************************************************
 Scans log from a buffer and stores new log data to the parsing buffer. Parses
@@ -2517,7 +2527,7 @@
 void
 recv_group_scan_log_recs(
 /*=====================*/
-	log_group_t* group,	/* in: log group */	
+	log_group_t* group,	/* in: log group */
 	dulint*	contiguous_lsn,	/* in/out: it is known that all log groups
 				contain contiguous log data up to this lsn */
 	dulint*	group_scanned_lsn)/* out: scanning succeeded up to this lsn */
@@ -2525,20 +2535,20 @@
 	ibool	finished;
 	dulint	start_lsn;
 	dulint	end_lsn;
-	
+
 	finished = FALSE;
 
 	start_lsn = *contiguous_lsn;
-		
-	while (!finished) {			
+
+	while (!finished) {
 		end_lsn = ut_dulint_add(start_lsn, RECV_SCAN_SIZE);
 
 		log_group_read_log_seg(LOG_RECOVER, log_sys->buf,
 						group, start_lsn, end_lsn);
 
 		finished = recv_scan_log_recs(TRUE,
-                                (buf_pool->n_frames
-                                - recv_n_pool_free_frames) * UNIV_PAGE_SIZE,
+				(buf_pool->n_frames
+				- recv_n_pool_free_frames) * UNIV_PAGE_SIZE,
 				TRUE, log_sys->buf,
 				RECV_SCAN_SIZE, start_lsn,
 				contiguous_lsn, group_scanned_lsn);
@@ -2588,7 +2598,7 @@
 
 	ut_ad((type != LOG_CHECKPOINT)
 			|| (ut_dulint_cmp(limit_lsn, ut_dulint_max) == 0));
-	
+
 	if (type == LOG_CHECKPOINT) {
 		recv_sys_create();
 		recv_sys_init(FALSE, buf_pool_get_curr_size());
@@ -2599,7 +2609,7 @@
 		"InnoDB: The user has set SRV_FORCE_NO_LOG_REDO on\n");
 		fprintf(stderr,
 		"InnoDB: Skipping log redo\n");
-		
+
 		return(DB_SUCCESS);
 	}
 
@@ -2610,7 +2620,7 @@
 	mutex_enter(&(log_sys->mutex));
 
 	/* Look for the latest checkpoint from any of the log groups */
-	
+
 	err = recv_find_max_checkpoint(&max_cp_group, &max_cp_field);
 
 	if (err != DB_SUCCESS) {
@@ -2619,7 +2629,7 @@
 
 		return(err);
 	}
-		
+
 	log_group_read_checkpoint_info(max_cp_group, max_cp_field);
 
 	buf = log_sys->checkpoint_buf;
@@ -2630,7 +2640,7 @@
 
 	/* Read the first log file header to print a note if this is
 	a recovery from a restored InnoDB Hot Backup */
-	
+
 	fil_io(OS_FILE_READ | OS_FILE_LOG, TRUE, max_cp_group->space_id,
 				0, 0, LOG_FILE_HDR_SIZE,
 				log_hdr_buf, max_cp_group);
@@ -2645,7 +2655,7 @@
 	"InnoDB: %s\n", log_hdr_buf + LOG_FILE_WAS_CREATED_BY_HOT_BACKUP);
 		fprintf(stderr,
 "InnoDB: NOTE: the following crash recovery is part of a normal restore.\n");
-		
+
 		/* Wipe over the label now */
 
 		memset(log_hdr_buf + LOG_FILE_WAS_CREATED_BY_HOT_BACKUP,
@@ -2657,7 +2667,7 @@
 				log_hdr_buf, max_cp_group);
 	}
 
-#ifdef UNIV_LOG_ARCHIVE				
+#ifdef UNIV_LOG_ARCHIVE
 	group = UT_LIST_GET_FIRST(log_sys->log_groups);
 
 	while (group) {
@@ -2684,9 +2694,9 @@
 		/* NOTE: we always do a 'recovery' at startup, but only if
 		there is something wrong we will print a message to the
 		user about recovery: */
-		
+
 		if (ut_dulint_cmp(checkpoint_lsn, max_flushed_lsn) != 0
-	    	   || ut_dulint_cmp(checkpoint_lsn, min_flushed_lsn) != 0) {
+		   || ut_dulint_cmp(checkpoint_lsn, min_flushed_lsn) != 0) {
 
 			if (ut_dulint_cmp(checkpoint_lsn, max_flushed_lsn)
 								< 0) {
@@ -2708,11 +2718,11 @@
 				(ulong) ut_dulint_get_low(max_flushed_lsn));
 			}
 
-	    	   	recv_needed_recovery = TRUE;
-	    	   
+			recv_needed_recovery = TRUE;
+
 			ut_print_timestamp(stderr);
 
-	    		fprintf(stderr,
+			fprintf(stderr,
 "  InnoDB: Database was not shut down normally!\n"
 "InnoDB: Starting crash recovery.\n");
 
@@ -2725,9 +2735,9 @@
 			check if there are half-written pages in data files,
 			and restore them from the doublewrite buffer if
 			possible */
-		
+
 			if (srv_force_recovery < SRV_FORCE_NO_LOG_REDO) {
-		
+
 				fprintf(stderr,
 "InnoDB: Restoring possible half-written data pages from the doublewrite\n"
 "InnoDB: buffer...\n");
@@ -2737,10 +2747,10 @@
 
 			ut_print_timestamp(stderr);
 
-			fprintf(stderr, 
+			fprintf(stderr,
 "  InnoDB: Starting log scan based on checkpoint at\n"
 "InnoDB: log sequence number %lu %lu.\n",
-		 			(ulong) ut_dulint_get_high(checkpoint_lsn),
+					(ulong) ut_dulint_get_high(checkpoint_lsn),
 					(ulong) ut_dulint_get_low(checkpoint_lsn));
 		} else {
 			/* Init the doublewrite buffer memory structure */
@@ -2751,7 +2761,7 @@
 	contiguous_lsn = ut_dulint_align_down(recv_sys->scanned_lsn,
 						OS_FILE_LOG_BLOCK_SIZE);
 	if (type == LOG_ARCHIVE) {
- 		/* Try to recover the remaining part from logs: first from
+		/* Try to recover the remaining part from logs: first from
 		the logs of the archived group */
 
 		group = recv_sys->archive_group;
@@ -2769,7 +2779,7 @@
 
 			return(DB_ERROR);
 		}
-		
+
 		recv_group_scan_log_recs(group, &contiguous_lsn,
 							&group_scanned_lsn);
 		if (ut_dulint_cmp(recv_sys->scanned_lsn, checkpoint_lsn) < 0) {
@@ -2795,25 +2805,25 @@
 
 	if ((type == LOG_ARCHIVE) && (group == recv_sys->archive_group)) {
 		group = UT_LIST_GET_NEXT(log_groups, group);
-	}		
+	}
 
-	while (group) {		
+	while (group) {
 		old_scanned_lsn = recv_sys->scanned_lsn;
 
 		recv_group_scan_log_recs(group, &contiguous_lsn,
 							&group_scanned_lsn);
 		group->scanned_lsn = group_scanned_lsn;
-		
+
 		if (ut_dulint_cmp(old_scanned_lsn, group_scanned_lsn) < 0) {
 			/* We found a more up-to-date group */
 
 			up_to_date_group = group;
 		}
-		
+
 		if ((type == LOG_ARCHIVE)
 				&& (group == recv_sys->archive_group)) {
 			group = UT_LIST_GET_NEXT(log_groups, group);
-		}		
+		}
 
 		group = UT_LIST_GET_NEXT(log_groups, group);
 	}
@@ -2856,7 +2866,7 @@
 
 		return(DB_ERROR);
 	}
-	
+
 	/* Synchronize the uncorrupted log groups to the most up-to-date log
 	group; we also copy checkpoint info to groups */
 
@@ -2866,7 +2876,7 @@
 #ifdef UNIV_LOG_ARCHIVE
 	log_sys->archived_lsn = archived_lsn;
 #endif /* UNIV_LOG_ARCHIVE */
-	
+
 	recv_synchronize_groups(up_to_date_group);
 
 	if (!recv_needed_recovery) {
@@ -2883,7 +2893,7 @@
 	} else {
 		srv_start_lsn = recv_sys->recovered_lsn;
 	}
-	
+
 	log_sys->lsn = recv_sys->recovered_lsn;
 
 	ut_memcpy(log_sys->buf, recv_sys->last_block, OS_FILE_LOG_BLOCK_SIZE);
@@ -2895,9 +2905,9 @@
 	log_sys->written_to_all_lsn = log_sys->lsn;
 
 	log_sys->last_checkpoint_lsn = checkpoint_lsn;
-	
+
 	log_sys->next_checkpoint_no = ut_dulint_add(checkpoint_no, 1);
-								
+
 #ifdef UNIV_LOG_ARCHIVE
 	if (ut_dulint_cmp(archived_lsn, ut_dulint_max) == 0) {
 
@@ -2906,11 +2916,11 @@
 #endif /* UNIV_LOG_ARCHIVE */
 
 	mutex_enter(&(recv_sys->mutex));
-	
+
 	recv_sys->apply_log_recs = TRUE;
 
- 	mutex_exit(&(recv_sys->mutex));
-	
+	mutex_exit(&(recv_sys->mutex));
+
 	mutex_exit(&(log_sys->mutex));
 
 	recv_lsn_checks_on = TRUE;
@@ -2929,11 +2939,11 @@
 recv_recovery_from_checkpoint_finish(void)
 /*======================================*/
 {
-	int 		i;
+	int		i;
 	os_thread_id_t	recovery_thread_id;
 
 	/* Apply the hashed log records to the respective file pages */
-	
+
 	if (srv_force_recovery < SRV_FORCE_NO_LOG_REDO) {
 
 		recv_apply_hashed_log_recs(TRUE);
@@ -3007,7 +3017,7 @@
 		group->lsn = log_sys->lsn;
 		group->lsn_offset = LOG_FILE_HDR_SIZE;
 #ifdef UNIV_LOG_ARCHIVE
-		group->archived_file_no = arch_log_no;		
+		group->archived_file_no = arch_log_no;
 		group->archived_offset = 0;
 #endif /* UNIV_LOG_ARCHIVE */
 
@@ -3015,7 +3025,7 @@
 			recv_truncate_group(group, group->lsn, group->lsn,
 						group->lsn, group->lsn);
 		}
-	
+
 		group = UT_LIST_GET_NEXT(log_groups, group);
 	}
 
@@ -3029,20 +3039,20 @@
 #ifdef UNIV_LOG_ARCHIVE
 	log_sys->archived_lsn = log_sys->lsn;
 #endif /* UNIV_LOG_ARCHIVE */
-	
+
 	log_block_init(log_sys->buf, log_sys->lsn);
 	log_block_set_first_rec_group(log_sys->buf, LOG_BLOCK_HDR_SIZE);
 
 	log_sys->buf_free = LOG_BLOCK_HDR_SIZE;
 	log_sys->lsn = ut_dulint_add(log_sys->lsn, LOG_BLOCK_HDR_SIZE);
-	
+
 	mutex_exit(&(log_sys->mutex));
 
 	/* Reset the checkpoint fields in logs */
-	
+
 	log_make_checkpoint_at(ut_dulint_max, TRUE);
 	log_make_checkpoint_at(ut_dulint_max, TRUE);
-	
+
 	mutex_enter(&(log_sys->mutex));
 }
 
@@ -3064,22 +3074,22 @@
 	byte*		buf;
 	ulint		i;
 	ulint		log_dir_len;
-	char*		name;
-	static const
-	char		logfilename[] = "ib_logfile";
+	char		name[5000];
+	static const char ib_logfile_basename[] = "ib_logfile";
 
 	log_dir_len = strlen(log_dir);
-	/* reserve space for log_dir, "ib_logfile" and a number */
-	name = memcpy(mem_alloc(log_dir_len + ((sizeof logfilename) + 11)),
-		log_dir, log_dir_len);
-	memcpy(name + log_dir_len, logfilename, sizeof logfilename);
+	/* full path name of ib_logfile consists of log dir path + basename
+	   + number. This must fit in the name buffer.
+	*/
+	ut_a(log_dir_len + strlen(ib_logfile_basename) + 11  < sizeof(name));
 
 	buf = ut_malloc(LOG_FILE_HDR_SIZE + OS_FILE_LOG_BLOCK_SIZE);
-        memset(buf, '\0', LOG_FILE_HDR_SIZE + OS_FILE_LOG_BLOCK_SIZE);
+	memset(buf, '\0', LOG_FILE_HDR_SIZE + OS_FILE_LOG_BLOCK_SIZE);
 
 	for (i = 0; i < n_log_files; i++) {
 
-		sprintf(name + log_dir_len + sizeof logfilename, "%lu", (ulong) i);
+		sprintf(name, "%s%s%lu", log_dir,
+			ib_logfile_basename, (ulong)i);
 
 		log_file = os_file_create_simple(name, OS_FILE_CREATE,
 						OS_FILE_READ_WRITE, &success);
@@ -3111,13 +3121,13 @@
 	}
 
 	/* We pretend there is a checkpoint at lsn + LOG_BLOCK_HDR_SIZE */
-	
+
 	log_reset_first_header_and_checkpoint(buf, lsn);
-	
+
 	log_block_init_in_old_format(buf + LOG_FILE_HDR_SIZE, lsn);
 	log_block_set_first_rec_group(buf + LOG_FILE_HDR_SIZE,
 							LOG_BLOCK_HDR_SIZE);
-	strcpy(name + log_dir_len + sizeof logfilename, "0");
+	sprintf(name, "%s%s%lu", log_dir, ib_logfile_basename, (ulong)0);
 
 	log_file = os_file_create_simple(name, OS_FILE_OPEN,
 						OS_FILE_READ_WRITE, &success);
@@ -3132,7 +3142,6 @@
 	os_file_flush(log_file);
 	os_file_close(log_file);
 
-	mem_free(name);
 	ut_free(buf);
 }
 #endif /* UNIV_HOTBACKUP */
@@ -3164,11 +3173,11 @@
 
 	ut_a(0);
 
-try_open_again:	
+try_open_again:
 	buf = log_sys->buf;
 
 	/* Add the file to the archive file space; open the file */
-	
+
 	log_archived_file_name_gen(name, group->id, group->archived_file_no);
 
 	file_handle = os_file_create(name, OS_FILE_OPEN,
@@ -3176,12 +3185,12 @@
 
 	if (ret == FALSE) {
 ask_again:
-		fprintf(stderr, 
+		fprintf(stderr,
 	"InnoDB: Do you want to copy additional archived log files\n"
 	"InnoDB: to the directory\n");
-		fprintf(stderr, 
+		fprintf(stderr,
 	"InnoDB: or were these all the files needed in recovery?\n");
-		fprintf(stderr, 
+		fprintf(stderr,
 	"InnoDB: (Y == copy more files; N == this is all)?");
 
 		input_char = getchar();
@@ -3201,25 +3210,27 @@
 	ut_a(ret);
 
 	ut_a(file_size_high == 0);
-	
+
 	fprintf(stderr, "InnoDB: Opened archived log file %s\n", name);
-			
+
 	ret = os_file_close(file_handle);
-	
+
 	if (file_size < LOG_FILE_HDR_SIZE) {
 		fprintf(stderr,
 			"InnoDB: Archive file header incomplete %s\n", name);
-	    
+
 		return(TRUE);
 	}
 
 	ut_a(ret);
-	
+
 	/* Add the archive file as a node to the space */
-		
+
 	fil_node_create(name, 1 + file_size / UNIV_PAGE_SIZE,
-					    group->archive_space_id, FALSE);
-	ut_a(RECV_SCAN_SIZE >= LOG_FILE_HDR_SIZE);
+		group->archive_space_id, FALSE);
+#if RECV_SCAN_SIZE < LOG_FILE_HDR_SIZE
+# error "RECV_SCAN_SIZE < LOG_FILE_HDR_SIZE"
+#endif
 
 	/* Read the archive file header */
 	fil_io(OS_FILE_READ | OS_FILE_LOG, TRUE, group->archive_space_id, 0, 0,
@@ -3228,11 +3239,11 @@
 	/* Check if the archive file header is consistent */
 
 	if (mach_read_from_4(buf + LOG_GROUP_ID) != group->id
-	    || mach_read_from_4(buf + LOG_FILE_NO)
-						!= group->archived_file_no) {
+		|| mach_read_from_4(buf + LOG_FILE_NO)
+		!= group->archived_file_no) {
 		fprintf(stderr,
 	"InnoDB: Archive file header inconsistent %s\n", name);
-	    
+
 		return(TRUE);
 	}
 
@@ -3242,22 +3253,22 @@
 
 		return(TRUE);
 	}
-	
+
 	start_lsn = mach_read_from_8(buf + LOG_FILE_START_LSN);
 	file_end_lsn = mach_read_from_8(buf + LOG_FILE_END_LSN);
 
 	if (ut_dulint_is_zero(recv_sys->scanned_lsn)) {
 
 		if (ut_dulint_cmp(recv_sys->parse_start_lsn, start_lsn) < 0) {
-			fprintf(stderr, 
+			fprintf(stderr,
 	"InnoDB: Archive log file %s starts from too big a lsn\n",
-								name);	    
+				name);
 			return(TRUE);
 		}
-	
+
 		recv_sys->scanned_lsn = start_lsn;
 	}
-	
+
 	if (ut_dulint_cmp(recv_sys->scanned_lsn, start_lsn) != 0) {
 
 		fprintf(stderr,
@@ -3267,7 +3278,7 @@
 	}
 
 	read_offset = LOG_FILE_HDR_SIZE;
-	
+
 	for (;;) {
 		len = RECV_SCAN_SIZE;
 
@@ -3280,10 +3291,10 @@
 
 			break;
 		}
-	
+
 #ifdef UNIV_DEBUG
 		if (log_debug_writes) {
-			fprintf(stderr, 
+			fprintf(stderr,
 "InnoDB: Archive read starting at lsn %lu %lu, len %lu from file %s\n",
 					(ulong) ut_dulint_get_high(start_lsn),
 					(ulong) ut_dulint_get_low(start_lsn),
@@ -3296,8 +3307,8 @@
 			read_offset % UNIV_PAGE_SIZE, len, buf, NULL);
 
 		ret = recv_scan_log_recs(TRUE,
-                                (buf_pool->n_frames -
-                                recv_n_pool_free_frames) * UNIV_PAGE_SIZE,
+				(buf_pool->n_frames -
+				recv_n_pool_free_frames) * UNIV_PAGE_SIZE,
 				TRUE, buf, len, start_lsn,
 				&dummy_lsn, &scanned_lsn);
 
@@ -3309,10 +3320,10 @@
 		if (ret) {
 			fprintf(stderr,
 		"InnoDB: Archive log file %s does not scan right\n",
-								name);	    
+				name);
 			return(TRUE);
 		}
-		
+
 		read_offset += len;
 		start_lsn = ut_dulint_add(start_lsn, len);
 
@@ -3342,12 +3353,12 @@
 	ulint		trunc_len;
 	ibool		ret;
 	ulint		err;
-	
+
 	ut_a(0);
 
 	recv_sys_create();
 	recv_sys_init(FALSE, buf_pool_get_curr_size());
-	
+
 	recv_recovery_on = TRUE;
 	recv_recovery_from_backup_on = TRUE;
 
@@ -3360,9 +3371,9 @@
 	while (group) {
 		if (group->id == group_id) {
 
- 			break;
+			break;
 		}
-		
+
 		group = UT_LIST_GET_NEXT(log_groups, group);
 	}
 
@@ -3384,7 +3395,7 @@
 	recv_sys->archive_group = group;
 
 	ret = FALSE;
-	
+
 	mutex_enter(&(log_sys->mutex));
 
 	while (!ret) {
@@ -3392,9 +3403,9 @@
 
 		/* Close and truncate a possible processed archive file
 		from the file space */
-		
+
 		trunc_len = UNIV_PAGE_SIZE
-			    * fil_space_get_size(group->archive_space_id);
+			* fil_space_get_size(group->archive_space_id);
 		if (trunc_len > 0) {
 			fil_space_truncate_start(group->archive_space_id,
 								trunc_len);

--- 1.103.13.2/innobase/row/row0mysql.c	2006-04-21 01:07:33 +04:00
+++ 1.123/storage/innobase/row/row0mysql.c	2006-04-21 02:03:25 +04:00
@@ -45,7 +45,7 @@
 };
 
 UT_LIST_BASE_NODE_T(row_mysql_drop_t)	row_mysql_drop_list;
-ibool	row_mysql_drop_list_inited 	= FALSE;
+ibool	row_mysql_drop_list_inited	= FALSE;
 
 /* Magic table names for invoking various monitor threads */
 static const char S_innodb_monitor[] = "innodb_monitor";
@@ -89,8 +89,8 @@
 		return(FALSE);
 	}
 	return(0 == strcmp(name + 6, "host")
-	    || 0 == strcmp(name + 6, "user")
-	    || 0 == strcmp(name + 6, "db"));
+		|| 0 == strcmp(name + 6, "user")
+		|| 0 == strcmp(name + 6, "db"));
 }
 
 /***********************************************************************
@@ -207,7 +207,7 @@
 
 	mach_write_to_n_little_endian(dest, col_len - 8, len);
 
-	ut_memcpy(dest + col_len - 8, (byte*)&data, sizeof(byte*));	
+	ut_memcpy(dest + col_len - 8, &data, sizeof(byte*));
 }
 
 /***********************************************************************
@@ -226,7 +226,7 @@
 
 	*len = mach_read_from_n_little_endian(ref, col_len - 8);
 
-	ut_memcpy((byte*)&data, ref + col_len - 8, sizeof(byte*));
+	ut_memcpy(&data, ref + col_len - 8, sizeof(byte*));
 
 	return(data);
 }
@@ -267,7 +267,7 @@
 					VARCHAR then this is irrelevant */
 	ulint		comp)		/* in: nonzero=compact format */
 {
-	byte*		ptr 	= mysql_data;
+	byte*		ptr	= mysql_data;
 	dtype_t*	dtype;
 	ulint		type;
 	ulint		lenlen;
@@ -299,13 +299,13 @@
 
 		buf += col_len;
 	} else if ((type == DATA_VARCHAR
-		    || type == DATA_VARMYSQL
-		    || type == DATA_BINARY)) {
+			|| type == DATA_VARMYSQL
+			|| type == DATA_BINARY)) {
 
 		if (dtype_get_mysql_type(dtype) == DATA_MYSQL_TRUE_VARCHAR) {
 			/* The length of the actual data is stored to 1 or 2
 			bytes at the start of the field */
-			
+
 			if (row_format_col) {
 				if (dtype->prtype & DATA_LONG_TRUE_VARCHAR) {
 					lenlen = 2;
@@ -318,7 +318,7 @@
 			}
 
 			ptr = row_mysql_read_true_varchar(&col_len, mysql_data,
-								      lenlen);
+				lenlen);
 		} else {
 			/* Remove trailing spaces from old style VARCHAR
 			columns. */
@@ -361,12 +361,12 @@
 		Consider a CHAR(n) field, a field of n characters.
 		It will contain between n * mbminlen and n * mbmaxlen bytes.
 		We will try to truncate it to n bytes by stripping
-		space padding.  If the field contains single-byte
+		space padding.	If the field contains single-byte
 		characters only, it will be truncated to n characters.
 		Consider a CHAR(5) field containing the string ".a   "
 		where "." denotes a 3-byte character represented by
 		the bytes "$%&".  After our stripping, the string will
-		be stored as "$%&a " (5 bytes).  The string ".abc "
+		be stored as "$%&a " (5 bytes).	 The string ".abc "
 		will be stored as "$%&abc" (6 bytes).
 
 		The space padding will be restored in row0sel.c, function
@@ -410,10 +410,10 @@
 					row is used, as row may contain
 					pointers to this record! */
 {
-	mysql_row_templ_t*	templ;	
+	mysql_row_templ_t*	templ;
 	dfield_t*		dfield;
 	ulint			i;
-	
+
 	ut_ad(prebuilt->template_type == ROW_MYSQL_WHOLE_ROW);
 	ut_ad(prebuilt->mysql_template);
 
@@ -426,7 +426,7 @@
 			/* Column may be SQL NULL */
 
 			if (mysql_rec[templ->mysql_null_byte_offset] &
- 					(byte) (templ->mysql_null_bit_mask)) {
+					(byte) (templ->mysql_null_bit_mask)) {
 
 				/* It is SQL NULL */
 
@@ -434,18 +434,18 @@
 
 				goto next_column;
 			}
-		}			
-		
+		}
+
 		row_mysql_store_col_in_innobase_format(dfield,
-					prebuilt->ins_upd_rec_buff
-						+ templ->mysql_col_offset,
-					TRUE, /* MySQL row format data */
-					mysql_rec + templ->mysql_col_offset,
-					templ->mysql_col_len,
-					prebuilt->table->comp);
+			prebuilt->ins_upd_rec_buff
+			+ templ->mysql_col_offset,
+			TRUE, /* MySQL row format data */
+			mysql_rec + templ->mysql_col_offset,
+			templ->mysql_col_len,
+			dict_table_is_comp(prebuilt->table));
 next_column:
 		;
-	} 
+	}
 }
 
 /********************************************************************
@@ -469,20 +469,21 @@
 
 handle_new_error:
 	err = trx->error_state;
-	
+
 	ut_a(err != DB_SUCCESS);
-	
+
 	trx->error_state = DB_SUCCESS;
 
-	if (err == DB_DUPLICATE_KEY) {
-           	if (savept) {
+	if ((err == DB_DUPLICATE_KEY)
+		|| (err == DB_FOREIGN_DUPLICATE_KEY)) {
+		if (savept) {
 			/* Roll back the latest, possibly incomplete
 			insertion or update */
 
 			trx_general_rollback_for_mysql(trx, TRUE, savept);
 		}
 	} else if (err == DB_TOO_BIG_RECORD) {
-           	if (savept) {
+		if (savept) {
 			/* Roll back the latest, possibly incomplete
 			insertion or update */
 
@@ -492,7 +493,7 @@
 	} else if (err == DB_ROW_IS_REFERENCED
 		   || err == DB_NO_REFERENCED_ROW
 		   || err == DB_CANNOT_ADD_CONSTRAINT) {
-           	if (savept) {
+		if (savept) {
 			/* Roll back the latest, possibly incomplete
 			insertion or update */
 
@@ -519,10 +520,10 @@
 		to version 3.23.43 */
 
 		trx_general_rollback_for_mysql(trx, FALSE, NULL);
-				
+
 	} else if (err == DB_OUT_OF_FILE_SPACE
 		   || err == DB_LOCK_WAIT_TIMEOUT) {
-           	if (savept) {
+		if (savept) {
 			/* Roll back the latest, possibly incomplete
 			insertion or update */
 
@@ -536,32 +537,32 @@
 		"InnoDB: The database cannot continue operation because of\n"
 		"InnoDB: lack of space. You must add a new data file to\n"
 		"InnoDB: my.cnf and restart the database.\n", stderr);
-		
+
 		exit(1);
 	} else if (err == DB_CORRUPTION) {
 
-	       fputs(
-	    "InnoDB: We detected index corruption in an InnoDB type table.\n"
-	    "InnoDB: You have to dump + drop + reimport the table or, in\n"
-	    "InnoDB: a case of widespread corruption, dump all InnoDB\n"
-	    "InnoDB: tables and recreate the whole InnoDB tablespace.\n"
-	    "InnoDB: If the mysqld server crashes after the startup or when\n"
-	    "InnoDB: you dump the tables, look at\n"
-	    "InnoDB: http://dev.mysql.com/doc/mysql/en/Forcing_recovery.html"
-	    " for help.\n", stderr);
+		fputs(
+	"InnoDB: We detected index corruption in an InnoDB type table.\n"
+	"InnoDB: You have to dump + drop + reimport the table or, in\n"
+	"InnoDB: a case of widespread corruption, dump all InnoDB\n"
+	"InnoDB: tables and recreate the whole InnoDB tablespace.\n"
+	"InnoDB: If the mysqld server crashes after the startup or when\n"
+	"InnoDB: you dump the tables, look at\n"
+	"InnoDB: http://dev.mysql.com/doc/mysql/en/Forcing_recovery.html"
+	" for help.\n", stderr);
 
 	} else {
 		fprintf(stderr, "InnoDB: unknown error code %lu\n",
 			(ulong) err);
 		ut_error;
-	}		
+	}
 
 	if (trx->error_state != DB_SUCCESS) {
 		*new_err = trx->error_state;
 	} else {
 		*new_err = err;
 	}
-	
+
 	trx->error_state = DB_SUCCESS;
 
 	return(FALSE);
@@ -570,6 +571,7 @@
 	InnoDB Hot Backup builds.  Besides, this function should never
 	be called in InnoDB Hot Backup. */
 	ut_error;
+	return(FALSE);
 #endif /* UNIV_HOTBACKUP */
 }
 
@@ -588,7 +590,7 @@
 	dtuple_t*	ref;
 	ulint		ref_len;
 	ulint		i;
-	
+
 	heap = mem_heap_create(128);
 
 	prebuilt = mem_heap_alloc(heap, sizeof(row_prebuilt_t));
@@ -615,22 +617,24 @@
 	prebuilt->ins_node = NULL;
 
 	prebuilt->ins_upd_rec_buff = NULL;
-	
+
 	prebuilt->upd_node = NULL;
 	prebuilt->ins_graph = NULL;
 	prebuilt->upd_graph = NULL;
 
-  	prebuilt->pcur = btr_pcur_create_for_mysql();
-  	prebuilt->clust_pcur = btr_pcur_create_for_mysql();
+	prebuilt->pcur = btr_pcur_create_for_mysql();
+	prebuilt->clust_pcur = btr_pcur_create_for_mysql();
 
 	prebuilt->select_lock_type = LOCK_NONE;
 	prebuilt->stored_select_lock_type = 99999999;
 
+	prebuilt->row_read_type = ROW_READ_WITH_LOCKS;
+
 	prebuilt->sel_graph = NULL;
 
 	prebuilt->search_tuple = dtuple_create(heap,
 					2 * dict_table_get_n_cols(table));
-	
+
 	clust_index = dict_table_get_first_index(table);
 
 	/* Make sure that search_tuple is long enough for clustered index */
@@ -668,7 +672,7 @@
 	ulint	i;
 
 	if (prebuilt->magic_n != ROW_PREBUILT_ALLOCATED
-	    || prebuilt->magic_n2 != ROW_PREBUILT_ALLOCATED) {
+		|| prebuilt->magic_n2 != ROW_PREBUILT_ALLOCATED) {
 		fprintf(stderr,
 "InnoDB: Error: trying to free a corrupt\n"
 "InnoDB: table handle. Magic n %lu, magic n2 %lu, table name",
@@ -677,7 +681,7 @@
 		ut_print_name(stderr, NULL, prebuilt->table->name);
 		putc('\n', stderr);
 
-		mem_analyze_corruption((byte*)prebuilt);
+		mem_analyze_corruption(prebuilt);
 
 		ut_error;
 	}
@@ -699,11 +703,11 @@
 	if (prebuilt->sel_graph) {
 		que_graph_free_recursive(prebuilt->sel_graph);
 	}
-	
+
 	if (prebuilt->upd_graph) {
 		que_graph_free_recursive(prebuilt->upd_graph);
 	}
-	
+
 	if (prebuilt->blob_heap) {
 		mem_heap_free(prebuilt->blob_heap);
 	}
@@ -711,15 +715,15 @@
 	if (prebuilt->old_vers_heap) {
 		mem_heap_free(prebuilt->old_vers_heap);
 	}
-	
+
 	for (i = 0; i < MYSQL_FETCH_CACHE_SIZE; i++) {
 		if (prebuilt->fetch_cache[i] != NULL) {
 
 			if ((ROW_PREBUILT_FETCH_MAGIC_N !=
-			    mach_read_from_4((prebuilt->fetch_cache[i]) - 4))
-			    || (ROW_PREBUILT_FETCH_MAGIC_N !=
-			    mach_read_from_4((prebuilt->fetch_cache[i])
-			    			+ prebuilt->mysql_row_len))) {
+					mach_read_from_4((prebuilt->fetch_cache[i]) - 4))
+				|| (ROW_PREBUILT_FETCH_MAGIC_N !=
+					mach_read_from_4((prebuilt->fetch_cache[i])
+						+ prebuilt->mysql_row_len))) {
 				fputs(
 			"InnoDB: Error: trying to free a corrupt\n"
 			"InnoDB: fetch buffer.\n", stderr);
@@ -750,14 +754,14 @@
 	row_prebuilt_t*	prebuilt,	/* in: prebuilt struct in MySQL
 					handle */
 	trx_t*		trx)		/* in: transaction handle */
-{	
+{
 	if (trx->magic_n != TRX_MAGIC_N) {
 		fprintf(stderr,
 		"InnoDB: Error: trying to use a corrupt\n"
 		"InnoDB: trx handle. Magic n %lu\n",
 		(ulong) trx->magic_n);
 
-		mem_analyze_corruption((byte*)trx);
+		mem_analyze_corruption(trx);
 
 		ut_error;
 	}
@@ -770,7 +774,7 @@
 		ut_print_name(stderr, NULL, prebuilt->table->name);
 		putc('\n', stderr);
 
-		mem_analyze_corruption((byte*)prebuilt);
+		mem_analyze_corruption(prebuilt);
 
 		ut_error;
 	}
@@ -787,7 +791,7 @@
 
 	if (prebuilt->sel_graph) {
 		prebuilt->sel_graph->trx = trx;
-	}	
+	}
 }
 
 /*************************************************************************
@@ -799,7 +803,7 @@
 row_get_prebuilt_insert_row(
 /*========================*/
 					/* out: prebuilt dtuple; the column
-					type information is also set in it */ 
+					type information is also set in it */
 	row_prebuilt_t*	prebuilt)	/* in: prebuilt struct in MySQL
 					handle */
 {
@@ -809,14 +813,14 @@
 	ulint		i;
 
 	ut_ad(prebuilt && table && prebuilt->trx);
-	
+
 	if (prebuilt->ins_node == NULL) {
 
 		/* Not called before for this handle: create an insert node
 		and query graph to the prebuilt struct */
 
 		node = ins_node_create(INS_DIRECT, table, prebuilt->heap);
-		
+
 		prebuilt->ins_node = node;
 
 		if (prebuilt->ins_upd_rec_buff == NULL) {
@@ -824,7 +828,7 @@
 						prebuilt->heap,
 						prebuilt->mysql_row_len);
 		}
-		
+
 		row = dtuple_create(prebuilt->heap,
 					dict_table_get_n_cols(table));
 
@@ -834,8 +838,8 @@
 		a debug assertion from failing */
 
 		for (i = 0; i < dtuple_get_n_fields(row); i++) {
-		    
-		        dtuple_get_nth_field(row, i)->len = UNIV_SQL_NULL;
+
+			dtuple_get_nth_field(row, i)->len = UNIV_SQL_NULL;
 		}
 
 		ins_node_set_new_row(node, row);
@@ -848,7 +852,7 @@
 		prebuilt->ins_graph->state = QUE_FORK_ACTIVE;
 	}
 
-	return(prebuilt->ins_node->row);	
+	return(prebuilt->ins_node->row);
 }
 
 /*************************************************************************
@@ -861,7 +865,7 @@
 	dict_table_t*	table)	/* in: table */
 {
 	ulint	counter;
-	
+
 	counter = table->stat_modified_counter;
 
 	table->stat_modified_counter = counter + 1;
@@ -873,16 +877,16 @@
 	a counter table which is very small and updated very often. */
 
 	if (counter > 2000000000
-	    || ((ib_longlong)counter > 16 + table->stat_n_rows / 16)) {
+		|| ((ib_longlong)counter > 16 + table->stat_n_rows / 16)) {
 
 		dict_update_statistics(table);
-	}	
+	}
 }
-		  	
+
 /*************************************************************************
 Unlocks an AUTO_INC type lock possibly reserved by trx. */
 
-void		  	
+void
 row_unlock_table_autoinc_for_mysql(
 /*===============================*/
 	trx_t*	trx)	/* in: transaction */
@@ -909,15 +913,15 @@
 	row_prebuilt_t*	prebuilt)	/* in: prebuilt struct in the MySQL
 					table handle */
 {
-	trx_t*		trx 		= prebuilt->trx;
+	trx_t*		trx		= prebuilt->trx;
 	ins_node_t*	node		= prebuilt->ins_node;
 	que_thr_t*	thr;
 	ulint		err;
 	ibool		was_lock_wait;
-	
+
 	ut_ad(trx);
 	ut_ad(trx->mysql_thread_id == os_thread_get_curr_id());
-	
+
 	if (trx->auto_inc_lock) {
 
 		return(DB_SUCCESS);
@@ -987,11 +991,11 @@
 	ulint		mode)		/* in: lock mode of table
 					(ignored if table==NULL) */
 {
-	trx_t*		trx 		= prebuilt->trx;
+	trx_t*		trx		= prebuilt->trx;
 	que_thr_t*	thr;
 	ulint		err;
 	ibool		was_lock_wait;
-	
+
 	ut_ad(trx);
 	ut_ad(trx->mysql_thread_id == os_thread_get_curr_id());
 
@@ -1042,12 +1046,12 @@
 	}
 
 	que_thr_stop_for_mysql_no_error(thr, trx);
-		
+
 	trx->op_info = "";
 
-	return((int) err);	
+	return((int) err);
 }
-					
+
 /*************************************************************************
 Does an insert for MySQL. */
 
@@ -1063,15 +1067,15 @@
 	que_thr_t*	thr;
 	ulint		err;
 	ibool		was_lock_wait;
-	trx_t*		trx 		= prebuilt->trx;
+	trx_t*		trx		= prebuilt->trx;
 	ins_node_t*	node		= prebuilt->ins_node;
-	
+
 	ut_ad(trx);
 	ut_ad(trx->mysql_thread_id == os_thread_get_curr_id());
 
 	if (prebuilt->table->ibd_file_missing) {
-	        ut_print_timestamp(stderr);
-	        fprintf(stderr, "  InnoDB: Error:\n"
+		ut_print_timestamp(stderr);
+		fprintf(stderr, "  InnoDB: Error:\n"
 "InnoDB: MySQL is trying to use a table handle but the .ibd file for\n"
 "InnoDB: table %s does not exist.\n"
 "InnoDB: Have you deleted the .ibd file from the database directory under\n"
@@ -1091,7 +1095,7 @@
 		ut_print_name(stderr, prebuilt->trx, prebuilt->table->name);
 		putc('\n', stderr);
 
-		mem_analyze_corruption((byte*)prebuilt);
+		mem_analyze_corruption(prebuilt);
 
 		ut_error;
 	}
@@ -1120,9 +1124,9 @@
 	}
 
 	row_mysql_convert_row_to_innobase(node->row, prebuilt, mysql_rec);
-	
+
 	savept = trx_savept_take(trx);
-	
+
 	thr = que_fork_get_first_thr(prebuilt->ins_graph);
 
 	if (prebuilt->sql_stat_start) {
@@ -1131,7 +1135,7 @@
 	} else {
 		node->state = INS_NODE_ALLOC_ROW_ID;
 	}
-	
+
 	que_thr_move_to_run_state_for_mysql(thr, trx);
 
 run_again:
@@ -1139,7 +1143,7 @@
 	thr->prev_node = node;
 
 	row_ins_step(thr);
-	
+
 	err = trx->error_state;
 
 	if (err != DB_SUCCESS) {
@@ -1161,15 +1165,15 @@
 	}
 
 	que_thr_stop_for_mysql_no_error(thr, trx);
-	
+
 	prebuilt->table->stat_n_rows++;
 
 	srv_n_rows_inserted++;
-	
+
 	if (prebuilt->table->stat_n_rows == 0) {
 		/* Avoid wrap-over */
 		prebuilt->table->stat_n_rows--;
-	}	
+	}
 
 	row_update_statistics_if_needed(prebuilt->table);
 	trx->op_info = "";
@@ -1189,11 +1193,11 @@
 	sel_node_t*	node;
 
 	ut_ad(prebuilt && prebuilt->trx);
-	
+
 	if (prebuilt->sel_graph == NULL) {
 
 		node = sel_node_create(prebuilt->heap);
-				
+
 		prebuilt->sel_graph =
 			que_node_get_parent(
 				pars_complete_graph_for_exec(node,
@@ -1218,7 +1222,7 @@
 	upd_node_t*	node;
 
 	node = upd_node_create(heap);
-		
+
 	node->in_mysql_interface = TRUE;
 	node->is_delete = FALSE;
 	node->searched_update = FALSE;
@@ -1230,7 +1234,7 @@
 	node->update = upd_create(dict_table_get_n_cols(table), heap);
 
 	node->update_n_fields = dict_table_get_n_cols(table);
-	
+
 	UT_LIST_INIT(node->columns);
 	node->has_clust_rec_x_lock = TRUE;
 	node->cmpl_info = 0;
@@ -1257,7 +1261,7 @@
 	upd_node_t*	node;
 
 	ut_ad(prebuilt && table && prebuilt->trx);
-	
+
 	if (prebuilt->upd_node == NULL) {
 
 		/* Not called before for this handle: create an update node
@@ -1266,7 +1270,7 @@
 		node = row_create_update_node_for_mysql(table, prebuilt->heap);
 
 		prebuilt->upd_node = node;
-		
+
 		prebuilt->upd_graph =
 			que_node_get_parent(
 				pars_complete_graph_for_exec(node,
@@ -1294,7 +1298,7 @@
 	ulint		err;
 	que_thr_t*	thr;
 	ibool		was_lock_wait;
-	dict_index_t*	clust_index; 
+	dict_index_t*	clust_index;
 /*	ulint		ref_len; */
 	upd_node_t*	node;
 	dict_table_t*	table		= prebuilt->table;
@@ -1303,10 +1307,10 @@
 	ut_ad(prebuilt && trx);
 	ut_ad(trx->mysql_thread_id == os_thread_get_curr_id());
 	UT_NOT_USED(mysql_rec);
-	
+
 	if (prebuilt->table->ibd_file_missing) {
-	        ut_print_timestamp(stderr);
-	        fprintf(stderr, "  InnoDB: Error:\n"
+		ut_print_timestamp(stderr);
+		fprintf(stderr, "  InnoDB: Error:\n"
 "InnoDB: MySQL is trying to use a table handle but the .ibd file for\n"
 "InnoDB: table %s does not exist.\n"
 "InnoDB: Have you deleted the .ibd file from the database directory under\n"
@@ -1326,7 +1330,7 @@
 		ut_print_name(stderr, prebuilt->trx, prebuilt->table->name);
 		putc('\n', stderr);
 
-		mem_analyze_corruption((byte*)prebuilt);
+		mem_analyze_corruption(prebuilt);
 
 		ut_error;
 	}
@@ -1359,9 +1363,9 @@
 		btr_pcur_copy_stored_position(node->pcur,
 							prebuilt->clust_pcur);
 	}
-		
+
 	ut_a(node->pcur->rel_pos == BTR_PCUR_ON);
-	 	
+
 	/* MySQL seems to call rnd_pos before updating each row it
 	has cached: we can get the correct cursor position from
 	prebuilt->pcur; NOTE that we cannot build the row reference
@@ -1370,7 +1374,7 @@
 	the row id used as the clustered index key */
 
 	savept = trx_savept_take(trx);
-	
+
 	thr = que_fork_get_first_thr(prebuilt->upd_graph);
 
 	node->state = UPD_NODE_UPDATE_CLUSTERED;
@@ -1389,7 +1393,7 @@
 
 	if (err != DB_SUCCESS) {
 		que_thr_stop_for_mysql(thr);
-		
+
 		if (err == DB_RECORD_NOT_FOUND) {
 			trx->error_state = DB_SUCCESS;
 			trx->op_info = "";
@@ -1397,10 +1401,11 @@
 			return((int) err);
 		}
 
-    thr->lock_state= QUE_THR_LOCK_ROW;
+		thr->lock_state= QUE_THR_LOCK_ROW;
 		was_lock_wait = row_mysql_handle_errors(&err, trx, thr,
-								&savept);
-    thr->lock_state= QUE_THR_LOCK_NOLOCK;;
+			&savept);
+		thr->lock_state= QUE_THR_LOCK_NOLOCK;
+
 		if (was_lock_wait) {
 			goto run_again;
 		}
@@ -1436,9 +1441,8 @@
 really were set. This function removes a newly set lock under prebuilt->pcur,
 and also under prebuilt->clust_pcur. Currently, this is only used and tested
 in the case of an UPDATE or a DELETE statement, where the row lock is of the
-LOCK_X or LOCK_S type. 
-
-Thus, this implements a 'mini-rollback' that releases the latest record 
+LOCK_X type.
+Thus, this implements a 'mini-rollback' that releases the latest record
 locks we set. */
 
 int
@@ -1457,11 +1461,11 @@
 	btr_pcur_t*	clust_pcur	= prebuilt->clust_pcur;
 	trx_t*		trx		= prebuilt->trx;
 	rec_t*		rec;
-	mtr_t           mtr;
-	
+	mtr_t		mtr;
+
 	ut_ad(prebuilt && trx);
 	ut_ad(trx->mysql_thread_id == os_thread_get_curr_id());
-			
+
 	if (!srv_locks_unsafe_for_binlog) {
 
 		fprintf(stderr,
@@ -1475,19 +1479,12 @@
 
 	index = btr_pcur_get_btr_cur(pcur)->index;
 
-	if (UNIV_UNLIKELY(index == NULL)) {
-		fprintf(stderr,
-"InnoDB: Error: Index is not set for persistent cursor.\n");
-		ut_print_buf(stderr, (const byte*)pcur, sizeof(btr_pcur_t));
-		ut_error;
-	}
-
-	if (trx_new_rec_locks_contain(trx, index)) {
+	if (index != NULL && trx_new_rec_locks_contain(trx, index)) {
 
 		mtr_start(&mtr);
-			
+
 		/* Restore the cursor position and find the record */
-		
+
 		if (!has_latches_on_recs) {
 			btr_pcur_restore_position(BTR_SEARCH_LEAF, pcur, &mtr);
 		}
@@ -1504,7 +1501,7 @@
 		garbage! */
 
 		if (index->type & DICT_CLUSTERED) {
-			
+
 			goto func_exit;
 		}
 	}
@@ -1514,7 +1511,7 @@
 	if (index != NULL && trx_new_rec_locks_contain(trx, index)) {
 
 		mtr_start(&mtr);
-			
+
 		/* Restore the cursor position and find the record */
 
 		if (!has_latches_on_recs) {
@@ -1528,10 +1525,10 @@
 
 		mtr_commit(&mtr);
 	}
-			
+
 func_exit:
 	trx->op_info = "";
-	
+
 	return(DB_SUCCESS);
 }
 
@@ -1565,7 +1562,7 @@
 
 	if (err == DB_LOCK_WAIT) {
 		/* Handle lock wait here */
-	
+
 		que_thr_stop_for_mysql(thr);
 
 		srv_suspend_mysql_thread(thr);
@@ -1580,7 +1577,7 @@
 		}
 
 		/* Retry operation after a normal lock wait */
-		
+
 		goto run_again;
 	}
 
@@ -1618,8 +1615,8 @@
 	clust_index = dict_table_get_first_index(table);
 
 	if (dtype_get_mtype(dict_index_get_nth_type(clust_index, 0))
-	 							== DATA_SYS) {
-	 	return(TRUE);
+								== DATA_SYS) {
+		return(TRUE);
 	}
 
 	return(FALSE);
@@ -1673,9 +1670,7 @@
 
 	if (!ptr) {
 		/* table name does not begin with "/rsql" */
-		dict_mem_table_free(table);
 		trx_commit_for_mysql(trx);
-
 		return(DB_ERROR);
 	}
 	else {
@@ -1707,7 +1702,7 @@
 	trx_t*	trx)	/* in: transaction */
 {
 	ut_a(trx->dict_operation_lock_mode == 0);
-	
+
 	rw_lock_s_lock(&dict_operation_lock);
 
 	trx->dict_operation_lock_mode = RW_S_LATCH;
@@ -1738,8 +1733,8 @@
 	trx_t*	trx)	/* in: transaction */
 {
 	ut_a(trx->dict_operation_lock_mode == 0
-	     || trx->dict_operation_lock_mode == RW_X_LATCH);
-	
+		|| trx->dict_operation_lock_mode == RW_X_LATCH);
+
 	/* Serialize data dictionary operations with dictionary mutex:
 	no deadlocks or lock waits can occur then in these operations */
 
@@ -1787,7 +1782,6 @@
 	const char*	table_name;
 	ulint		table_name_len;
 	ulint		err;
-	ulint		i;
 
 	ut_ad(trx->mysql_thread_id == os_thread_get_curr_id());
 #ifdef UNIV_SYNC_DEBUG
@@ -1795,7 +1789,7 @@
 	ut_ad(mutex_own(&(dict_sys->mutex)));
 #endif /* UNIV_SYNC_DEBUG */
 	ut_ad(trx->dict_operation_lock_mode == RW_X_LATCH);
-	
+
 	if (srv_created_new_raw) {
 		fputs(
 		"InnoDB: A new raw disk partition was initialized or\n"
@@ -1805,14 +1799,13 @@
 		"InnoDB: with raw, and innodb_force_... is removed.\n",
 		stderr);
 
-		dict_mem_table_free(table);
 		trx_commit_for_mysql(trx);
 
 		return(DB_ERROR);
 	}
 
 	trx->op_info = "creating table";
-	
+
 	if (row_mysql_is_system_table(table->name)) {
 
 		fprintf(stderr,
@@ -1820,25 +1813,11 @@
     "InnoDB: MySQL system tables must be of the MyISAM type!\n",
 		table->name);
 
-		dict_mem_table_free(table);
 		trx_commit_for_mysql(trx);
 
 		return(DB_ERROR);
 	}
 
-	/* Check that no reserved column names are used. */
-	for (i = 0; i < dict_table_get_n_user_cols(table); i++) {
-		dict_col_t*	col = dict_table_get_nth_col(table, i);
-
-		if (dict_col_name_is_reserved(col->name)) {
-
-			dict_mem_table_free(table);
-			trx_commit_for_mysql(trx);
-
-			return(DB_ERROR);
-		}
-	}
-
 	trx_start_if_not_started(trx);
 
 	if (row_mysql_is_recovered_tmp_table(table->name)) {
@@ -1849,7 +1828,7 @@
 		#sql-table in the tablespace. We have here a special
 		mechanism to recover such tables by renaming them to
 		rsql... */
- 				
+
 		return(row_mysql_recover_tmp_table(table, trx));
 	}
 
@@ -1868,7 +1847,7 @@
 
 		/* Table equals "innodb_monitor":
 		start monitor prints */
- 				
+
 		srv_print_innodb_monitor = TRUE;
 
 		/* The lock timeout monitor thread also takes care
@@ -1897,7 +1876,7 @@
 	} else if (table_name_len == sizeof S_innodb_mem_validate
 			&& !memcmp(table_name, S_innodb_mem_validate,
 				sizeof S_innodb_mem_validate)) {
-	        /* We define here a debugging feature intended for
+		/* We define here a debugging feature intended for
 		developers */
 
 		fputs("Validating InnoDB memory:\n"
@@ -1917,7 +1896,7 @@
 	heap = mem_heap_create(512);
 
 	trx->dict_operation = TRUE;
-	
+
 	node = tab_create_graph_create(table, heap);
 
 	thr = pars_complete_graph_for_exec(node, trx, heap);
@@ -1929,27 +1908,27 @@
 
 	if (err != DB_SUCCESS) {
 		/* We have special error handling here */
-		
+
 		trx->error_state = DB_SUCCESS;
-		
+
 		trx_general_rollback_for_mysql(trx, FALSE, NULL);
 
 		if (err == DB_OUT_OF_FILE_SPACE) {
-	    		ut_print_timestamp(stderr);
+			ut_print_timestamp(stderr);
 
-			fputs("  InnoDB: Warning: cannot create table ", 
+			fputs("  InnoDB: Warning: cannot create table ",
 								stderr);
 			ut_print_name(stderr, trx, table->name);
 			fputs(" because tablespace full\n", stderr);
 
 			if (dict_table_get_low(table->name)) {
 
-		     		row_drop_table_for_mysql(table->name, trx,
+				row_drop_table_for_mysql(table->name, trx,
 								FALSE);
 			}
 
 		} else if (err == DB_DUPLICATE_KEY) {
-	    		ut_print_timestamp(stderr);
+			ut_print_timestamp(stderr);
 
 			fputs("  InnoDB: Error: table ", stderr);
 			ut_print_name(stderr, trx, table->name);
@@ -1967,7 +1946,7 @@
      "InnoDB: http://dev.mysql.com/doc/mysql/en/"
      "InnoDB_troubleshooting_datadict.html\n", stderr);
 		}
-		
+
 		/* We may also get err == DB_ERROR if the .ibd file for the
 		table already exists */
 
@@ -2005,13 +1984,13 @@
 	ulint		err;
 	ulint		i, j;
 	ulint		len;
-	
+
 #ifdef UNIV_SYNC_DEBUG
 	ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX));
 	ut_ad(mutex_own(&(dict_sys->mutex)));
 #endif /* UNIV_SYNC_DEBUG */
 	ut_ad(trx->mysql_thread_id == os_thread_get_curr_id());
-	
+
 	trx->op_info = "creating index";
 
 	trx_start_if_not_started(trx);
@@ -2023,8 +2002,8 @@
 	for (i = 0; i < dict_index_get_n_fields(index); i++) {
 		for (j = 0; j < i; j++) {
 			if (0 == ut_strcmp(
-			      dict_index_get_nth_field(index, j)->name,
-			      dict_index_get_nth_field(index, i)->name)) {
+				    dict_index_get_nth_field(index, j)->name,
+				    dict_index_get_nth_field(index, i)->name)) {
 
 				ut_print_timestamp(stderr);
 
@@ -2042,7 +2021,7 @@
 				goto error_handling;
 			}
 		}
-		
+
 		/* Check also that prefix_len and actual length
 		< DICT_MAX_INDEX_COL_LEN */
 
@@ -2051,7 +2030,7 @@
 		if (field_lengths) {
 			len = ut_max(len, field_lengths[i]);
 		}
-		
+
 		if (len >= DICT_MAX_INDEX_COL_LEN) {
 			err = DB_TOO_BIG_RECORD;
 
@@ -2078,14 +2057,14 @@
 	ut_a(thr == que_fork_start_command(que_node_get_parent(thr)));
 	que_run_threads(thr);
 
- 	err = trx->error_state;
+	err = trx->error_state;
 
 	que_graph_free((que_t*) que_node_get_parent(thr));
 
 error_handling:
 	if (err != DB_SUCCESS) {
 		/* We have special error handling here */
-		
+
 		trx->error_state = DB_SUCCESS;
 
 		trx_general_rollback_for_mysql(trx, FALSE, NULL);
@@ -2094,7 +2073,7 @@
 
 		trx->error_state = DB_SUCCESS;
 	}
-	
+
 	trx->op_info = "";
 
 	return((int) err);
@@ -2133,7 +2112,7 @@
 	ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX));
 #endif /* UNIV_SYNC_DEBUG */
 	ut_a(sql_string);
-	
+
 	trx->op_info = "adding foreign keys";
 
 	trx_start_if_not_started(trx);
@@ -2155,7 +2134,7 @@
 
 	if (err != DB_SUCCESS) {
 		/* We have special error handling here */
-		
+
 		trx->error_state = DB_SUCCESS;
 
 		trx_general_rollback_for_mysql(trx, FALSE, NULL);
@@ -2197,19 +2176,19 @@
 	ut_print_name(stderr, name);
 	fputs(" in background drop list\n", stderr); */
 
-  	/* Try to drop the table in InnoDB */
+	/* Try to drop the table in InnoDB */
+
+	error = row_drop_table_for_mysql(name, trx, FALSE);
 
-  	error = row_drop_table_for_mysql(name, trx, FALSE);
-  	
 	/* Flush the log to reduce probability that the .frm files and
 	the InnoDB data dictionary get out-of-sync if the user runs
 	with innodb_flush_log_at_trx_commit = 0 */
-	
+
 	log_buffer_flush_to_disk();
 
-  	trx_commit_for_mysql(trx);
+	trx_commit_for_mysql(trx);
 
-  	trx_free_for_background(trx);
+	trx_free_for_background(trx);
 
 	return((int) error);
 }
@@ -2229,7 +2208,7 @@
 	dict_table_t*		table;
 	ulint			n_tables;
 	ulint			n_tables_dropped = 0;
-loop:	
+loop:
 	mutex_enter(&kernel_mutex);
 
 	if (!row_mysql_drop_list_inited) {
@@ -2239,7 +2218,7 @@
 	}
 
 	drop = UT_LIST_GET_FIRST(row_mysql_drop_list);
-	
+
 	n_tables = UT_LIST_GET_LEN(row_mysql_drop_list);
 
 	mutex_exit(&kernel_mutex);
@@ -2255,12 +2234,12 @@
 	mutex_exit(&(dict_sys->mutex));
 
 	if (table == NULL) {
-	        /* If for some reason the table has already been dropped
+		/* If for some reason the table has already been dropped
 		through some other mechanism, do not try to drop it */
 
-	        goto already_dropped;
+		goto already_dropped;
 	}
-							
+
 	if (DB_SUCCESS != row_drop_table_for_mysql_in_background(
 							drop->table_name)) {
 		/* If the DROP fails for some table, we return, and let the
@@ -2276,7 +2255,7 @@
 
 	UT_LIST_REMOVE(row_mysql_drop_list, row_mysql_drop_list, drop);
 
-        ut_print_timestamp(stderr);
+	ut_print_timestamp(stderr);
 	fprintf(stderr,
 		"  InnoDB: Dropped table %s in background drop queue.\n",
 		drop->table_name);
@@ -2308,7 +2287,7 @@
 		UT_LIST_INIT(row_mysql_drop_list);
 		row_mysql_drop_list_inited = TRUE;
 	}
-	
+
 	return(UT_LIST_GET_LEN(row_mysql_drop_list));
 }
 
@@ -2327,7 +2306,7 @@
 	dict_table_t*	table)	/* in: table */
 {
 	row_mysql_drop_t*	drop;
-	
+
 	mutex_enter(&kernel_mutex);
 
 	if (!row_mysql_drop_list_inited) {
@@ -2335,14 +2314,14 @@
 		UT_LIST_INIT(row_mysql_drop_list);
 		row_mysql_drop_list_inited = TRUE;
 	}
-	
+
 	/* Look if the table already is in the drop list */
 	drop = UT_LIST_GET_FIRST(row_mysql_drop_list);
 
 	while (drop != NULL) {
 		if (strcmp(drop->table_name, table->name) == 0) {
 			/* Already in the list */
-			
+
 			mutex_exit(&kernel_mutex);
 
 			return(FALSE);
@@ -2354,9 +2333,9 @@
 	drop = mem_alloc(sizeof(row_mysql_drop_t));
 
 	drop->table_name = mem_strdup(table->name);
- 
+
 	UT_LIST_ADD_LAST(row_mysql_drop_list, row_mysql_drop_list, drop);
-	
+
 /*	fputs("InnoDB: Adding table ", stderr);
 	ut_print_name(stderr, drop->table_name);
 	fputs(" to background drop list\n", stderr); */
@@ -2426,8 +2405,8 @@
 	"FROM SYS_TABLES\n"
 	"WHERE NAME = table_name;\n"
 	"IF (SQL %% NOTFOUND) THEN\n"
-	"	COMMIT WORK;\n"
-	"	RETURN;\n"
+	"       COMMIT WORK;\n"
+	"       RETURN;\n"
 	"END IF;\n"
 	"UPDATE SYS_TABLES SET ID = new_id\n"
 	"WHERE ID = old_id;\n"
@@ -2469,8 +2448,8 @@
 
 	if (table->n_foreign_key_checks_running > 0) {
 
-	        ut_print_timestamp(stderr);
-		fputs("	 InnoDB: You are trying to DISCARD table ", stderr);
+		ut_print_timestamp(stderr);
+		fputs("  InnoDB: You are trying to DISCARD table ", stderr);
 		ut_print_name(stderr, trx, table->name);
 		fputs("\n"
 		 "InnoDB: though there is a foreign key check running on it.\n"
@@ -2486,7 +2465,7 @@
 	some other table (not the table itself) */
 
 	foreign = UT_LIST_GET_FIRST(table->referenced_list);
-	
+
 	while (foreign && foreign->foreign_table == table) {
 		foreign = UT_LIST_GET_NEXT(referenced_list, foreign);
 	}
@@ -2533,7 +2512,7 @@
 	ut_a(graph);
 
 	/* Remove any locks there are on the table or its records */
-	
+
 	lock_reset_all_on_table(table);
 
 	graph->trx = trx;
@@ -2569,14 +2548,14 @@
 			table->ibd_file_missing = TRUE;
 		}
 	}
-funct_exit:	
+funct_exit:
 	row_mysql_unlock_data_dictionary(trx);
 
 	if (graph) {
 		que_graph_free(graph);
 	}
 
-  	trx_commit_for_mysql(trx);
+	trx_commit_for_mysql(trx);
 
 	trx->op_info = "";
 
@@ -2606,7 +2585,7 @@
 	trx->op_info = "importing tablespace";
 
 	current_lsn = log_get_lsn();
-	
+
 	/* It is possible, though very improbable, that the lsn's in the
 	tablespace to be imported have risen above the current system lsn, if
 	a lengthy purge, ibuf merge, or rollback was performed on a backup
@@ -2706,10 +2685,10 @@
 		err = DB_ERROR;
 	}
 
-funct_exit:	
+funct_exit:
 	row_mysql_unlock_data_dictionary(trx);
 
-  	trx_commit_for_mysql(trx);
+	trx_commit_for_mysql(trx);
 
 	trx->op_info = "";
 
@@ -2800,7 +2779,7 @@
 		"InnoDB: database modifications by the user. Shut down\n"
 		"InnoDB: mysqld and edit my.cnf so that newraw is replaced\n"
 		"InnoDB: with raw, and innodb_force_... is removed.\n",
-                stderr);
+		stderr);
 
 		return(DB_ERROR);
 	}
@@ -2862,7 +2841,7 @@
 
 	if (table->n_foreign_key_checks_running > 0) {
 		ut_print_timestamp(stderr);
-		fputs("	 InnoDB: Cannot truncate table ", stderr);
+		fputs("  InnoDB: Cannot truncate table ", stderr);
 		ut_print_name(stderr, trx, table->name);
 		fputs(" by DROP+CREATE\n"
 "InnoDB: because there is a foreign key check running on it.\n",
@@ -2985,7 +2964,7 @@
 		trx_general_rollback_for_mysql(trx, FALSE, NULL);
 		trx->error_state = DB_SUCCESS;
 		ut_print_timestamp(stderr);
-fputs("	 InnoDB: Unable to assign a new identifier to table ", stderr);
+fputs("  InnoDB: Unable to assign a new identifier to table ", stderr);
 		ut_print_name(stderr, trx, table->name);
 		fputs("\n"
 "InnoDB: after truncating it.  Background processes may corrupt the table!\n",
@@ -2998,7 +2977,7 @@
 	dict_table_autoinc_initialize(table, 0);
 	dict_update_statistics(table);
 
-  	trx_commit_for_mysql(trx);
+	trx_commit_for_mysql(trx);
 
 funct_exit:
 
@@ -3060,46 +3039,46 @@
 	"FROM SYS_TABLES\n"
 	"WHERE NAME = table_name;\n"
 	"IF (SQL % NOTFOUND) THEN\n"
-	"	COMMIT WORK;\n"
-	"	RETURN;\n"
+	"       COMMIT WORK;\n"
+	"       RETURN;\n"
 	"END IF;\n"
 	"found := 1;\n"
 	"SELECT ID INTO sys_foreign_id\n"
 	"FROM SYS_TABLES\n"
 	"WHERE NAME = 'SYS_FOREIGN';\n"
 	"IF (SQL % NOTFOUND) THEN\n"
-	"	found := 0;\n"
+	"       found := 0;\n"
 	"END IF;\n"
 	"IF (table_name = 'SYS_FOREIGN') THEN\n"
-	"	found := 0;\n"
+	"       found := 0;\n"
 	"END IF;\n"
 	"IF (table_name = 'SYS_FOREIGN_COLS') THEN\n"
-	"	found := 0;\n"
+	"       found := 0;\n"
 	"END IF;\n"
 	"WHILE found = 1 LOOP\n"
-	"	SELECT ID INTO foreign_id\n"
-	"	FROM SYS_FOREIGN\n"
-	"	WHERE FOR_NAME = table_name\n"
-        "             AND TO_BINARY(FOR_NAME) = TO_BINARY(table_name);\n"
-	"	IF (SQL % NOTFOUND) THEN\n"
-	"		found := 0;\n"
-	"	ELSE"
-	"		DELETE FROM SYS_FOREIGN_COLS WHERE ID = foreign_id;\n"
-	"		DELETE FROM SYS_FOREIGN WHERE ID = foreign_id;\n"
-	"	END IF;\n"
+	"       SELECT ID INTO foreign_id\n"
+	"       FROM SYS_FOREIGN\n"
+	"       WHERE FOR_NAME = table_name\n"
+	"               AND TO_BINARY(FOR_NAME) = TO_BINARY(table_name);\n"
+	"       IF (SQL % NOTFOUND) THEN\n"
+	"               found := 0;\n"
+	"       ELSE"
+	"               DELETE FROM SYS_FOREIGN_COLS WHERE ID = foreign_id;\n"
+	"               DELETE FROM SYS_FOREIGN WHERE ID = foreign_id;\n"
+	"       END IF;\n"
 	"END LOOP;\n"
 	"found := 1;\n"
 	"WHILE found = 1 LOOP\n"
-	"	SELECT ID INTO index_id\n"
-	"	FROM SYS_INDEXES\n"
-	"	WHERE TABLE_ID = table_id;\n"	
-	"	IF (SQL % NOTFOUND) THEN\n"
-	"		found := 0;\n"
-	"	ELSE"
-	"		DELETE FROM SYS_FIELDS WHERE INDEX_ID = index_id;\n"
-	"		DELETE FROM SYS_INDEXES WHERE ID = index_id\n"
-	"					 AND TABLE_ID = table_id;\n"
-	"	END IF;\n"
+	"       SELECT ID INTO index_id\n"
+	"       FROM SYS_INDEXES\n"
+	"       WHERE TABLE_ID = table_id;\n"
+	"       IF (SQL % NOTFOUND) THEN\n"
+	"               found := 0;\n"
+	"       ELSE"
+	"               DELETE FROM SYS_FIELDS WHERE INDEX_ID = index_id;\n"
+	"               DELETE FROM SYS_INDEXES WHERE ID = index_id\n"
+	"                                        AND TABLE_ID = table_id;\n"
+	"       END IF;\n"
 	"END LOOP;\n"
 	"DELETE FROM SYS_COLUMNS WHERE TABLE_ID = table_id;\n"
 	"DELETE FROM SYS_TABLES WHERE ID = table_id;\n"
@@ -3115,7 +3094,7 @@
 		"InnoDB: database modifications by the user. Shut down\n"
 		"InnoDB: mysqld and edit my.cnf so that newraw is replaced\n"
 		"InnoDB: with raw, and innodb_force_... is removed.\n",
-                stderr);
+		stderr);
 
 		return(DB_ERROR);
 	}
@@ -3139,7 +3118,7 @@
 
 		/* Table name equals "innodb_monitor":
 		stop monitor prints */
- 				
+
 		srv_print_innodb_monitor = FALSE;
 		srv_print_innodb_lock_monitor = FALSE;
 	} else if (namelen == sizeof S_innodb_lock_monitor
@@ -3183,7 +3162,7 @@
 	ut_ad(mutex_own(&(dict_sys->mutex)));
 	ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX));
 #endif /* UNIV_SYNC_DEBUG */
-	
+
 	graph = pars_sql(sql);
 
 	ut_a(graph);
@@ -3198,13 +3177,13 @@
 
 	if (!table) {
 		err = DB_TABLE_NOT_FOUND;
-	    	ut_print_timestamp(stderr);
+		ut_print_timestamp(stderr);
 
 		fputs("  InnoDB: Error: table ", stderr);
 		ut_print_name(stderr, trx, name);
 		fputs(" does not exist in the InnoDB internal\n"
-     	"InnoDB: data dictionary though MySQL is trying to drop it.\n"
-     	"InnoDB: Have you copied the .frm file of the table to the\n"
+	"InnoDB: data dictionary though MySQL is trying to drop it.\n"
+	"InnoDB: Have you copied the .frm file of the table to the\n"
 	"InnoDB: MySQL database directory from another database?\n"
 	"InnoDB: You can look for further help from\n"
 	"InnoDB: http://dev.mysql.com/doc/mysql/en/"
@@ -3216,7 +3195,7 @@
 	some other table (not the table itself) */
 
 	foreign = UT_LIST_GET_FIRST(table->referenced_list);
-	
+
 	while (foreign && foreign->foreign_table == table) {
 	check_next_foreign:
 		foreign = UT_LIST_GET_NEXT(referenced_list, foreign);
@@ -3256,15 +3235,15 @@
 
 		added = row_add_table_to_background_drop_list(table);
 
-	        if (added) {
+		if (added) {
 			ut_print_timestamp(stderr);
-fputs("	 InnoDB: Warning: MySQL is trying to drop table ", stderr);
+fputs("  InnoDB: Warning: MySQL is trying to drop table ", stderr);
 			ut_print_name(stderr, trx, table->name);
 			fputs("\n"
 "InnoDB: though there are still open handles to it.\n"
 "InnoDB: Adding the table to the background drop queue.\n",
 			stderr);
-			
+
 			/* We return DB_SUCCESS to MySQL though the drop will
 			happen lazily later */
 
@@ -3290,8 +3269,8 @@
 		added = row_add_table_to_background_drop_list(table);
 
 		if (added) {
-	        	ut_print_timestamp(stderr);
-fputs("	 InnoDB: You are trying to drop table ", stderr);
+			ut_print_timestamp(stderr);
+fputs("  InnoDB: You are trying to drop table ", stderr);
 			ut_print_name(stderr, trx, table->name);
 			fputs("\n"
 "InnoDB: though there is a foreign key check running on it.\n"
@@ -3309,9 +3288,9 @@
 
 		goto funct_exit;
 	}
-	
+
 	/* Remove any locks there are on the table or its records */
-	
+
 	lock_reset_all_on_table(table);
 
 	trx->dict_operation = TRUE;
@@ -3327,7 +3306,7 @@
 		ut_a(err == DB_OUT_OF_FILE_SPACE);
 
 		err = DB_MUST_GET_MORE_FILE_SPACE;
-		
+
 		row_mysql_handle_errors(&err, trx, thr, NULL);
 
 		ut_error;
@@ -3336,7 +3315,7 @@
 		const char*	name_or_path;
 
 		space_id = table->space;
-		
+
 		if (table->dir_path_of_temp_table != NULL) {
 			dir_path_of_temp_table =
 				mem_strdup(table->dir_path_of_temp_table);
@@ -3370,7 +3349,7 @@
 
 				fprintf(stderr,
 "InnoDB: We removed now the InnoDB internal data dictionary entry\n"
-"InnoDB: of table ");	
+"InnoDB: of table ");
 				ut_print_name(stderr, trx, name);
 				fprintf(stderr, ".\n");
 
@@ -3382,7 +3361,7 @@
 			if (!success) {
 				fprintf(stderr,
 "InnoDB: We removed now the InnoDB internal data dictionary entry\n"
-"InnoDB: of table ");	
+"InnoDB: of table ");
 				ut_print_name(stderr, trx, name);
 				fprintf(stderr, ".\n");
 
@@ -3399,7 +3378,7 @@
 funct_exit:
 
 	if (locked_dictionary) {
-		row_mysql_unlock_data_dictionary(trx);	
+		row_mysql_unlock_data_dictionary(trx);
 	}
 
 	if (dir_path_of_temp_table) {
@@ -3407,8 +3386,8 @@
 	}
 
 	que_graph_free(graph);
-	
-  	trx_commit_for_mysql(trx);
+
+	trx_commit_for_mysql(trx);
 
 	trx->op_info = "";
 
@@ -3429,17 +3408,17 @@
 	const char*	name,	/* in: database name which ends to '/' */
 	trx_t*		trx)	/* in: transaction handle */
 {
-        dict_table_t* table;
+	dict_table_t* table;
 	char*	table_name;
 	int	err	= DB_SUCCESS;
 	ulint	namelen	= strlen(name);
-	
+
 	ut_ad(trx->mysql_thread_id == os_thread_get_curr_id());
 	ut_a(name != NULL);
 	ut_a(name[namelen - 1] == '/');
-	
+
 	trx->op_info = "dropping database";
-	
+
 	trx_start_if_not_started(trx);
 loop:
 	row_mysql_lock_data_dictionary(trx);
@@ -3466,11 +3445,11 @@
 			ut_print_name(stderr, trx, table_name);
 			fputs(".\n", stderr);
 
-		        os_thread_sleep(1000000);
+			os_thread_sleep(1000000);
 
-		        mem_free(table_name);
+			mem_free(table_name);
 
-		        goto loop;
+			goto loop;
 		}
 
 		err = row_drop_table_for_mysql(table_name, trx, TRUE);
@@ -3489,7 +3468,7 @@
 	}
 
 	row_mysql_unlock_data_dictionary(trx);
-	
+
 	trx_commit_for_mysql(trx);
 
 	trx->op_info = "";
@@ -3508,7 +3487,8 @@
 	const char*	name)	/* in: table name in the form
 				'database/tablename' */
 {
-	return(strstr(name, "/#sql") != NULL);
+	/* return(strstr(name, "/#sql") != NULL); */
+	return(strstr(name, "/@0023sql") != NULL);
 }
 
 /*************************************************************************
@@ -3564,55 +3544,55 @@
 	"old_t_name_len := LENGTH(old_table_name);\n"
 	"gen_constr_prefix := CONCAT(old_table_name, '_ibfk_');\n"
 	"WHILE found = 1 LOOP\n"
-	"	SELECT ID INTO foreign_id\n"
-	"	FROM SYS_FOREIGN\n"
-	"	WHERE FOR_NAME = old_table_name\n"
-	"	      AND TO_BINARY(FOR_NAME) = TO_BINARY(old_table_name);\n"
-	"	IF (SQL % NOTFOUND) THEN\n"
-	"	 found := 0;\n"
-	"	ELSE\n"
-	"	 UPDATE SYS_FOREIGN\n"
-	"	 SET FOR_NAME = new_table_name\n"
-	"	 WHERE ID = foreign_id;\n"
-	"	 id_len := LENGTH(foreign_id);\n"
-	"	 IF (INSTR(foreign_id, '/') > 0) THEN\n"
-	"	 	IF (INSTR(foreign_id,\n"
-	"				gen_constr_prefix) > 0)\n"
-	"		THEN\n"
-	"		  new_foreign_id :=\n"
-	"		    CONCAT(new_table_name,\n"
-	"			SUBSTR(foreign_id, old_t_name_len,\n"
-	"			      	 id_len - old_t_name_len));\n"
-	"		ELSE\n"
-	"		  new_foreign_id :=\n"
-	"		    CONCAT(new_db_name,\n"
-	"			SUBSTR(foreign_id,\n"
-	"				old_db_name_len,\n"
-	"				 id_len - old_db_name_len));\n"
-	"		END IF;\n"
-	"		UPDATE SYS_FOREIGN\n"
-	"		SET ID = new_foreign_id\n"
-	"		WHERE ID = foreign_id;\n"
-	"		UPDATE SYS_FOREIGN_COLS\n"
-	"		SET ID = new_foreign_id\n"
-	"		WHERE ID = foreign_id;\n"
-	"	 END IF;\n"
-	"	END IF;\n"
+	"       SELECT ID INTO foreign_id\n"
+	"       FROM SYS_FOREIGN\n"
+	"       WHERE FOR_NAME = old_table_name\n"
+	"               AND TO_BINARY(FOR_NAME) = TO_BINARY(old_table_name);\n"
+	"       IF (SQL % NOTFOUND) THEN\n"
+	"        found := 0;\n"
+	"       ELSE\n"
+	"        UPDATE SYS_FOREIGN\n"
+	"        SET FOR_NAME = new_table_name\n"
+	"        WHERE ID = foreign_id;\n"
+	"        id_len := LENGTH(foreign_id);\n"
+	"        IF (INSTR(foreign_id, '/') > 0) THEN\n"
+	"               IF (INSTR(foreign_id,\n"
+	"                               gen_constr_prefix) > 0)\n"
+	"               THEN\n"
+	"                 new_foreign_id :=\n"
+	"                   CONCAT(new_table_name,\n"
+	"                       SUBSTR(foreign_id, old_t_name_len,\n"
+	"                                id_len - old_t_name_len));\n"
+	"               ELSE\n"
+	"                 new_foreign_id :=\n"
+	"                   CONCAT(new_db_name,\n"
+	"                       SUBSTR(foreign_id,\n"
+	"                               old_db_name_len,\n"
+	"                                id_len - old_db_name_len));\n"
+	"               END IF;\n"
+	"               UPDATE SYS_FOREIGN\n"
+	"               SET ID = new_foreign_id\n"
+	"               WHERE ID = foreign_id;\n"
+	"               UPDATE SYS_FOREIGN_COLS\n"
+	"               SET ID = new_foreign_id\n"
+	"               WHERE ID = foreign_id;\n"
+	"        END IF;\n"
+	"       END IF;\n"
 	"END LOOP;\n"
 	"UPDATE SYS_FOREIGN SET REF_NAME = new_table_name\n"
 	"WHERE REF_NAME = old_table_name\n"
-	"      AND TO_BINARY(REF_NAME) = TO_BINARY(old_table_name);\n";
+	"       AND TO_BINARY(REF_NAME) = TO_BINARY(old_table_name);\n";
 	static const char str5[] =
 	"END;\n";
 
 	mem_heap_t*	heap			= NULL;
 	const char**	constraints_to_drop	= NULL;
 	ulint		n_constraints_to_drop	= 0;
-	ibool           recovering_temp_table   = FALSE;
+	ibool		recovering_temp_table	= FALSE;
 	ibool		old_is_tmp, new_is_tmp;
 	ulint		len;
 	ulint		i;
-        ibool		success;
+	ibool		success;
 	/* length of database name; 0 if not renaming to a temporary table */
 	ulint		db_name_len;
 	char*		sql;
@@ -3631,18 +3611,18 @@
 		"InnoDB: with raw, and innodb_force_... is removed.\n",
 		stderr);
 
-  		trx_commit_for_mysql(trx);
+		trx_commit_for_mysql(trx);
 		return(DB_ERROR);
 	}
-	
+
 	if (row_mysql_is_system_table(new_name)) {
-	    	
+
 		fprintf(stderr,
     "InnoDB: Error: trying to create a MySQL system table %s of type InnoDB.\n"
     "InnoDB: MySQL system tables must be of the MyISAM type!\n",
 		new_name);
 
-  		trx_commit_for_mysql(trx);
+		trx_commit_for_mysql(trx);
 		return(DB_ERROR);
 	}
 
@@ -3651,11 +3631,11 @@
 
 	old_is_tmp = row_is_mysql_tmp_table_name(old_name);
 	new_is_tmp = row_is_mysql_tmp_table_name(new_name);
-	
+
 	if (row_mysql_is_recovered_tmp_table(new_name)) {
 
-                recovering_temp_table = TRUE;
-        } else {
+		recovering_temp_table = TRUE;
+	} else {
 		/* Serialize data dictionary operations with dictionary mutex:
 		no deadlocks can occur then in these operations */
 
@@ -3666,30 +3646,30 @@
 
 	if (!table) {
 		err = DB_TABLE_NOT_FOUND;
-	    	ut_print_timestamp(stderr);
+		ut_print_timestamp(stderr);
 
-                fputs("  InnoDB: Error: table ", stderr);
-                ut_print_name(stderr, trx, old_name);
-                fputs(" does not exist in the InnoDB internal\n"
-     	"InnoDB: data dictionary though MySQL is trying to rename the table.\n"
-     	"InnoDB: Have you copied the .frm file of the table to the\n"
+		fputs("  InnoDB: Error: table ", stderr);
+		ut_print_name(stderr, trx, old_name);
+		fputs(" does not exist in the InnoDB internal\n"
+	"InnoDB: data dictionary though MySQL is trying to rename the table.\n"
+	"InnoDB: Have you copied the .frm file of the table to the\n"
 	"InnoDB: MySQL database directory from another database?\n"
 	"InnoDB: You can look for further help from\n"
-        "InnoDB: http://dev.mysql.com/doc/mysql/en/"
+	"InnoDB: http://dev.mysql.com/doc/mysql/en/"
 	"InnoDB_troubleshooting_datadict.html\n", stderr);
 		goto funct_exit;
 	}
 
 	if (table->ibd_file_missing) {
 		err = DB_TABLE_NOT_FOUND;
-	    	ut_print_timestamp(stderr);
+		ut_print_timestamp(stderr);
 
-                fputs("  InnoDB: Error: table ", stderr);
-                ut_print_name(stderr, trx, old_name);
-                fputs(
+		fputs("  InnoDB: Error: table ", stderr);
+		ut_print_name(stderr, trx, old_name);
+		fputs(
 	" does not have an .ibd file in the database directory.\n"
 	"InnoDB: You can look for further help from\n"
-        "InnoDB: http://dev.mysql.com/doc/mysql/en/"
+	"InnoDB: http://dev.mysql.com/doc/mysql/en/"
 	"InnoDB_troubleshooting_datadict.html\n", stderr);
 		goto funct_exit;
 	}
@@ -3708,7 +3688,7 @@
 		the ALTER TABLE contained DROP FOREIGN KEY <foreign key id>.*/
 
 		heap = mem_heap_create(100);
-		
+
 		err = dict_foreign_parse_drop_constraints(heap, trx,
 					table,
 					&n_constraints_to_drop,
@@ -3717,7 +3697,7 @@
 
 			goto funct_exit;
 		}
-		
+
 		/* reserve space for all database names */
 		len += 2 * n_constraints_to_drop
 			* (ut_strlenq(old_name, '\'')
@@ -3764,7 +3744,7 @@
 			sqlend += (sizeof str4a2) - 1;
 			sqlend = ut_memcpyq(sqlend, '\'',
 				old_name, db_name_len);
-                        sqlend = ut_strcpyq(sqlend, '\'',
+			sqlend = ut_strcpyq(sqlend, '\'',
 				constraints_to_drop[i]);
 			memcpy(sqlend, str4a3, (sizeof str4a3) - 1);
 			sqlend += (sizeof str4a3) - 1;
@@ -3781,7 +3761,7 @@
 					constraints_to_drop[i]);
 				memcpy(sqlend, str4a2, (sizeof str4a2) - 1);
 				sqlend += (sizeof str4a2) - 1;
-                        	sqlend = ut_strcpyq(sqlend, '\'',
+				sqlend = ut_strcpyq(sqlend, '\'',
 					constraints_to_drop[i]);
 				memcpy(sqlend, str4a3, (sizeof str4a3) - 1);
 				sqlend += (sizeof str4a3) - 1;
@@ -3797,7 +3777,7 @@
 	sqlend += sizeof str5;
 
 	ut_a(sqlend == sql + len + 1);
-	
+
 	graph = pars_sql(sql);
 
 	ut_a(graph);
@@ -3816,16 +3796,16 @@
 
 	if (err != DB_SUCCESS) {
 		if (err == DB_DUPLICATE_KEY) {
-	    		ut_print_timestamp(stderr);
+			ut_print_timestamp(stderr);
 			fputs(
      "  InnoDB: Error; possible reasons:\n"
      "InnoDB: 1) Table rename would cause two FOREIGN KEY constraints\n"
      "InnoDB: to have the same internal name in case-insensitive comparison.\n"
      "InnoDB: 2) table ", stderr);
-                ut_print_name(stderr, trx, new_name);
-                fputs(" exists in the InnoDB internal data\n"
+		ut_print_name(stderr, trx, new_name);
+		fputs(" exists in the InnoDB internal data\n"
      "InnoDB: dictionary though MySQL is trying rename table ", stderr);
-                ut_print_name(stderr, trx, old_name);
+		ut_print_name(stderr, trx, old_name);
 		fputs(" to it.\n"
      "InnoDB: Have you deleted the .frm file and not used DROP TABLE?\n"
      "InnoDB: You can look for further help from\n"
@@ -3870,7 +3850,7 @@
 
 		/* We only want to switch off some of the type checking in
 		an ALTER, not in a RENAME. */
-		
+
 		err = dict_load_foreigns(new_name,
 			old_is_tmp ? trx->check_foreigns : TRUE);
 
@@ -3904,7 +3884,7 @@
 			trx->error_state = DB_SUCCESS;
 		}
 	}
-funct_exit:	
+funct_exit:
 	if (!recovering_temp_table) {
 		row_mysql_unlock_data_dictionary(trx);
 	}
@@ -3916,8 +3896,8 @@
 	if (UNIV_LIKELY_NULL(heap)) {
 		mem_heap_free(heap);
 	}
-	
-  	trx_commit_for_mysql(trx);
+
+	trx_commit_for_mysql(trx);
 
 	trx->op_info = "";
 
@@ -3955,10 +3935,10 @@
 	*offsets_ = (sizeof offsets_) / sizeof *offsets_;
 
 	*n_rows = 0;
-	
+
 	buf = mem_alloc(UNIV_PAGE_SIZE);
 	heap = mem_heap_create(100);
-	
+
 	/* Make a dummy template in prebuilt, which we will use
 	in scanning the index entries */
 
@@ -3968,7 +3948,7 @@
 	prebuilt->n_template = 0;
 	prebuilt->need_to_access_clustered = FALSE;
 
- 	dtuple_set_n_fields(prebuilt->search_tuple, 0);
+	dtuple_set_n_fields(prebuilt->search_tuple, 0);
 
 	prebuilt->select_lock_type = LOCK_NONE;
 	cnt = 1000;
@@ -3991,11 +3971,11 @@
 	}
 
 	*n_rows = *n_rows + 1;
-	
+
 	/* row_search... returns the index record in buf, record origin offset
 	within buf stored in the first 4 bytes, because we have built a dummy
 	template */
-	
+
 	rec = buf + mach_read_from_4(buf);
 
 	if (prev_entry != NULL) {
@@ -4012,15 +3992,15 @@
 		/* In a unique secondary index we allow equal key values if
 		they contain SQL NULLs */
 
-	        for (i = 0;
-                     i < dict_index_get_n_ordering_defined_by_user(index);
+		for (i = 0;
+		     i < dict_index_get_n_ordering_defined_by_user(index);
 		     i++) {
-	                if (UNIV_SQL_NULL == dfield_get_len(
-                                      dtuple_get_nth_field(prev_entry, i))) {
+			if (UNIV_SQL_NULL == dfield_get_len(
+				    dtuple_get_nth_field(prev_entry, i))) {
 
-                        	contains_null = TRUE;
-	                }
-	        }
+				contains_null = TRUE;
+			}
+		}
 
 		if (cmp > 0) {
 			fputs("InnoDB: index records in a wrong order in ",
@@ -4048,12 +4028,12 @@
 
 	mem_heap_empty(heap);
 	offsets = offsets_;
-	
+
 	prev_entry = row_rec_to_index_entry(ROW_COPY_DATA, index, rec, heap);
 
 	ret = row_search_for_mysql(buf, PAGE_CUR_G, prebuilt, 0, ROW_SEL_NEXT);
 
-	goto loop;	
+	goto loop;
 }
 
 /*************************************************************************
@@ -4070,12 +4050,12 @@
 	dict_index_t*	index;
 	ulint		n_rows;
 	ulint		n_rows_in_table	= ULINT_UNDEFINED;
-	ulint		ret 		= DB_SUCCESS;
+	ulint		ret		= DB_SUCCESS;
 	ulint		old_isolation_level;
 
 	if (prebuilt->table->ibd_file_missing) {
-	        ut_print_timestamp(stderr);
-	        fprintf(stderr, "  InnoDB: Error:\n"
+		ut_print_timestamp(stderr);
+		fprintf(stderr, "  InnoDB: Error:\n"
 "InnoDB: MySQL is trying to use a table handle but the .ibd file for\n"
 "InnoDB: table %s does not exist.\n"
 "InnoDB: Have you deleted the .ibd file from the database directory under\n"
@@ -4109,7 +4089,7 @@
 		/* fputs("Validating index ", stderr);
 		ut_print_name(stderr, index->name);
 		putc('\n', stderr); */
-	
+
 		if (!btr_validate_tree(index->tree, prebuilt->trx)) {
 			ret = DB_ERROR;
 		} else {
@@ -4130,7 +4110,7 @@
 			} else if (n_rows != n_rows_in_table) {
 
 				ret = DB_ERROR;
- 
+
 				fputs("Error: ", stderr);
 				dict_index_name_print(stderr,
 					prebuilt->trx, index);
@@ -4146,7 +4126,7 @@
 
 	/* Restore the original isolation level */
 	prebuilt->trx->isolation_level = old_isolation_level;
-	
+
 	/* We validate also the whole adaptive hash index for all tables
 	at every CHECK TABLE */
 
Thread
bk commit into 5.1 tree (aivanov:1.2358)Alex Ivanov Notebook20 Apr