List:Commits« Previous MessageNext Message »
From:Mattias Jonsson Date:May 15 2012 11:29pm
Subject:bzr push into mysql-trunk branch (mattias.jonsson:3830 to 3831) WL#4443
View as plain text  
 3831 Mattias Jonsson	2012-05-16
      WL#4443:
      Added check of trigger columns vs
      partitioning columns to see if possible to do
      lock pruning.
      
      TODO: Fix multi update with triggers on two tables.

    modified:
      mysql-test/collections/mysql-trunk-wl4443.push
      mysql-test/r/partition_locking.result
      mysql-test/t/partition_locking.test
      sql/partition_info.cc
      sql/sql_trigger.cc
      sql/sql_trigger.h
      sql/sql_update.cc
 3830 Mattias Jonsson	2012-05-15
      WL#4443:
      Fix for crashes when failing during opening trigger tables.
      
      May need more work, but this fixes at least some crashes
      found in RQG.

    modified:
      mysql-test/r/partition_locking.result
      mysql-test/t/partition_locking.test
      sql/sql_base.cc
=== modified file 'mysql-test/collections/mysql-trunk-wl4443.push'
--- a/mysql-test/collections/mysql-trunk-wl4443.push	revid:mattias.jonsson@stripped
+++ b/mysql-test/collections/mysql-trunk-wl4443.push	revid:mattias.jonsson@stripped
@@ -7,9 +7,9 @@
 #perl mysql-test-run.pl --force --timer  --debug-server --parallel=auto --experimental=collections/default.experimental --comment=ps-debug  --vardir=var-ps-debug  --ps-protocol --skip-test-list=collections/disabled-daily.list
 
 # From default.push
-perl mysql-test-run.pl --timer --force --parallel=auto --comment=n_mix --vardir=var-n_mix --mysqld=--binlog-format=mixed --experimental=collections/default.experimental --skip-ndb --skip-test-list=collections/disabled-per-push.list --unit-tests
-perl mysql-test-run.pl --timer --force --parallel=auto --comment=main_ps_row --vardir=var-main-ps_row --suite=main,parts --ps-protocol --mysqld=--binlog-format=row --experimental=collections/default.experimental --skip-ndb --skip-test-list=collections/disabled-per-push.list
-perl mysql-test-run.pl --timer --force --parallel=auto --comment=main_embedded --vardir=var-main_emebbed  --suite=main,parts --embedded --experimental=collections/default.experimental --skip-ndb
+perl mysql-test-run.pl --timer --force --parallel=auto --comment=n_mix --vardir=var-n_mix --suite=main,sys_vars,binlog,federated,rpl,innodb,innodb_fts,perfschema,funcs_1,opt_trace,parts --mysqld=--binlog-format=mixed --experimental=collections/default.experimental --skip-ndb --skip-test-list=collections/disabled-per-push.list --unit-tests
+perl mysql-test-run.pl --timer --force --parallel=auto --comment=main-parts_ps_row --vardir=var-main-ps_row --suite=main,parts --ps-protocol --mysqld=--binlog-format=row --experimental=collections/default.experimental --skip-ndb --skip-test-list=collections/disabled-per-push.list
+perl mysql-test-run.pl --timer --force --parallel=auto --comment=main-parts_embedded --vardir=var-main_emebbed  --suite=main,parts --embedded --experimental=collections/default.experimental --skip-ndb
 # Partition specific
 #perl mysql-test-run.pl --force --timer  --debug-server --parallel=auto --experimental=collections/default.experimental --suite=parts --comment=parts-n_mix-debug  --vardir=var-n_mix-debug  --mysqld=--binlog-format=mixed --skip-test-list=collections/disabled-daily.list
 #perl mysql-test-run.pl --force --timer  --debug-server --parallel=auto --experimental=collections/default.experimental --suite=parts --comment=parts-row-debug  --vardir=var-row-debug  --mysqld=--binlog-format=row --skip-test-list=collections/disabled-daily.list
@@ -31,13 +31,13 @@ perl mysql-test-run.pl --timer --force -
 #perl mysql-test-run.pl --force --timer --debug-server --parallel=auto --experimental=collections/default.experimental --comment=rpl_crash_safe_master-debug --vardir=var-rpl_crash_safe_master-debug --suite=rpl --mysqld=--master-info-repository=TABLE --skip-test-list=collections/disabled-daily.list
 
 # Additional modes for rpl. Multi-Threaded Slave
-perl mysql-test-run.pl --timer --force --parallel=auto --comment=rpl_binlog_n_mix_MTS --vardir=var-mts-rpl-binlog-n_mix --mysqld=--binlog-format=mixed --experimental=collections/default.experimental --skip-ndb  --unit-tests --mysqld=--slave-parallel-workers=4 --mysqld=--slave-transaction-retries=0 --suite=rpl
-perl mysql-test-run.pl --timer --force --parallel=auto --comment=rpl_binlog_ps_row_MTS --vardir=var-mts-rpl-binlog-ps_row --ps-protocol --mysqld=--binlog-format=row --experimental=collections/default.experimental --skip-ndb  --mysqld=--slave-parallel-workers=4 --mysqld=--slave-transaction-retries=0 --suite=rpl
-perl mysql-test-run.pl --timer --force --parallel=auto --comment=rpl_binlog_stmt_MTS --vardir=var-mts-rpl-binlog-stmt --mysqld=--binlog-format=statement --experimental=collections/default.experimental --skip-ndb  --mysqld=--slave-parallel-workers=4 --mysqld=--slave-transaction-retries=0 --suite=rpl
+perl mysql-test-run.pl --timer --force --parallel=auto --comment=rpl_binlog_n_mix_MTS --vardir=var-mts-rpl-binlog-n_mix --mysqld=--binlog-format=mixed --experimental=collections/default.experimental --skip-ndb  --unit-tests --mysqld=--slave-parallel-workers=4 --mysqld=--slave-transaction-retries=0 --suite=rpl,parts
+perl mysql-test-run.pl --timer --force --parallel=auto --comment=rpl_binlog_ps_row_MTS --vardir=var-mts-rpl-binlog-ps_row --ps-protocol --mysqld=--binlog-format=row --experimental=collections/default.experimental --skip-ndb  --mysqld=--slave-parallel-workers=4 --mysqld=--slave-transaction-retries=0 --suite=rpl,parts
+perl mysql-test-run.pl --timer --force --parallel=auto --comment=rpl_binlog_stmt_MTS --vardir=var-mts-rpl-binlog-stmt --mysqld=--binlog-format=statement --experimental=collections/default.experimental --skip-ndb  --mysqld=--slave-parallel-workers=4 --mysqld=--slave-transaction-retries=0 --suite=rpl,parts
 
 # Additional runs for innodb-page-size=4k and 8k
-perl mysql-test-run.pl --timer --force --parallel=auto --comment=n_mix_4k_size --vardir=var-n_mix --mysqld=--binlog-format=mixed --experimental=collections/default.experimental --skip-ndb --skip-test-list=collections/disabled-per-push.list --mysqld=--innodb-page-size=4k --skip-test=innodb_ignore_builtin
-perl mysql-test-run.pl --timer --force --parallel=auto --comment=n_mix_8k_size --vardir=var-n_mix --mysqld=--binlog-format=mixed --experimental=collections/default.experimental --skip-ndb --skip-test-list=collections/disabled-per-push.list --mysqld=--innodb-page-size=8k --skip-test=innodb_ignore_builtin
+perl mysql-test-run.pl --timer --force --parallel=auto --comment=n_mix_4k_size --vardir=var-n_mix --suite=main,sys_vars,binlog,federated,rpl,innodb,innodb_fts,perfschema,funcs_1,opt_trace,parts --mysqld=--binlog-format=mixed --experimental=collections/default.experimental --skip-ndb --skip-test-list=collections/disabled-per-push.list --mysqld=--innodb-page-size=4k --skip-test=innodb_ignore_builtin
+perl mysql-test-run.pl --timer --force --parallel=auto --comment=n_mix_8k_size --vardir=var-n_mix --suite=main,sys_vars,binlog,federated,rpl,innodb,innodb_fts,perfschema,funcs_1,opt_trace,parts --mysqld=--binlog-format=mixed --experimental=collections/default.experimental --skip-ndb --skip-test-list=collections/disabled-per-push.list --mysqld=--innodb-page-size=8k --skip-test=innodb_ignore_builtin
 
 #Engine independent tests
 #perl mysql-test-run.pl --timer --force --debug-server --parallel=auto --comment=eits-rpl-binlog-row-tests-myisam-engine-debug --experimental=collections/default.experimental --vardir=var-binlog-row-eits-tests-myisam-engine-debug --suite=engines/iuds,engines/funcs --suite-timeout=500 --max-test-fail=0 --retry-failure=0 --mysqld=--default-storage-engine=myisam --do-test=rpl --mysqld=--binlog-format=row --skip-test-list=collections/disabled-daily.list

=== modified file 'mysql-test/r/partition_locking.result'
--- a/mysql-test/r/partition_locking.result	revid:mattias.jonsson@stripped
+++ b/mysql-test/r/partition_locking.result	revid:mattias.jonsson@stripped
@@ -82,6 +82,32 @@ HANDLER_WRITE	18
 # Auto increment value is not known until write.
 # 28 locks (table + 13 partition lock/unlock)
 # 1 commit
+FLUSH STATUS;
+INSERT INTO t2 VALUES (10, "First row, p10");
+SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS
+WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0;
+VARIABLE_NAME	VARIABLE_VALUE
+HANDLER_COMMIT	1
+HANDLER_EXTERNAL_LOCK	28
+HANDLER_WRITE	18
+# Insert pruning on tables with auto increment is not yet supported
+# 28 locks (table + 13 partition lock/unlock)
+# 1 commit
+#
+# UPDATE with auto increment, lock pruning
+#
+FLUSH STATUS;
+UPDATE t2 SET b = CONCAT(b, ", UPDATED") WHERE a = 10;
+SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS
+WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0;
+VARIABLE_NAME	VARIABLE_VALUE
+HANDLER_COMMIT	1
+HANDLER_EXTERNAL_LOCK	4
+HANDLER_READ_KEY	1
+HANDLER_UPDATE	1
+HANDLER_WRITE	17
+# 4 locks (table + 1 partition lock/unlock)
+# 1 read_key + 1 update + 1 commit
 #
 # Test of pruning with secondary column auto_inc INSERT
 #
@@ -4012,7 +4038,7 @@ a	b
 # Test triggers
 # Tables used in triggers cannot be pruned for locks.
 # Tables with triggers cannot be pruned for locks if
-# BEFORE INSERT/UPDATE trigger exists.
+# BEFORE INSERT/UPDATE trigger uses any partitioning columns.
 #
 CREATE TABLE t3
 (old_a int,
@@ -4250,53 +4276,6 @@ HANDLER_READ_KEY	1
 HANDLER_WRITE	19
 # 28 locks (3 tables, 1 + 5 + 5 partitions)
 # t1, t3 before delete trigger, t3 after delete trigger
-INSERT INTO t2 VALUES (1, "First row, p1");
-INSERT INTO t2 VALUES (3, "First row, p3");
-CREATE TRIGGER t2_before_insert BEFORE INSERT
-ON t2 FOR EACH ROW
-SET NEW.a = NEW.a + 1;
-CREATE TRIGGER t2_before_update BEFORE UPDATE
-ON t2 FOR EACH ROW
-SET NEW.a = NEW.a - 1;
-FLUSH STATUS;
-INSERT INTO t2 VALUES (1, "Second row, p1");
-SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS
-WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0;
-VARIABLE_NAME	VARIABLE_VALUE
-HANDLER_COMMIT	1
-HANDLER_EXTERNAL_LOCK	28
-HANDLER_WRITE	18
-# 28 locks (1 tables, 13 partitions lock/unlock)
-SELECT * FROM t2 ORDER BY a;
-a	b
-1	First row, p1
-2	Second row, p1
-3	First row, p3
-4	First row, p4, t1.b:First row, p4
-FLUSH STATUS;
-UPDATE t2 SET b = CONCAT(b, ", UPDATED3") WHERE a = 4;
-ERROR 23000: Duplicate entry '3' for key 'PRIMARY'
-SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS
-WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0;
-VARIABLE_NAME	VARIABLE_VALUE
-HANDLER_EXTERNAL_LOCK	28
-HANDLER_READ_KEY	2
-HANDLER_READ_RND	1
-HANDLER_ROLLBACK	1
-HANDLER_WRITE	18
-# 28 locks (1 tables, 13 partitions lock/unlock)
-FLUSH STATUS;
-UPDATE t2 SET b = CONCAT(b, ", UPDATED3") WHERE a = 1;
-SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS
-WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0;
-VARIABLE_NAME	VARIABLE_VALUE
-HANDLER_COMMIT	1
-HANDLER_DELETE	1
-HANDLER_EXTERNAL_LOCK	28
-HANDLER_READ_KEY	2
-HANDLER_READ_RND	1
-HANDLER_WRITE	18
-# 28 locks (1 tables, 13 partitions lock/unlock)
 SELECT * FROM t1 ORDER BY a;
 a	b
 0	First row, p0 REPLACED4
@@ -4321,9 +4300,6 @@ a	b
 313	Test313
 SELECT * FROM t2 ORDER BY a;
 a	b
-0	First row, p1, UPDATED3
-2	Second row, p1
-3	First row, p3
 4	First row, p4, t1.b:First row, p4
 SELECT * FROM t3 ORDER BY new_a;
 old_a	new_a	old_b	new_b
@@ -4346,6 +4322,350 @@ old_a	new_a	old_b	new_b
 3	3	BU: First row, p3, same as min(a) + 2 in t2	BU: First row, p3, same as min(a) + 2 in t2, UPDATED2
 98	NULL	AD: Test98	NULL
 98	NULL	BD: Test98	NULL
+TRUNCATE TABLE t1;
+DROP TRIGGER t1_before_insert;
+DROP TRIGGER t1_before_update;
+DROP TRIGGER t1_before_delete;
+DROP TRIGGER t1_after_insert;
+DROP TRIGGER t1_after_update;
+DROP TRIGGER t1_after_delete;
+#
+# Test BEFORE INSERT TRIGGER depending on partitioning column
+#
+CREATE TRIGGER t1_before_insert BEFORE INSERT
+ON t1 FOR EACH ROW
+SET NEW.b = CONCAT("b: ", NEW.b, " a: ", NEW.a);
+FLUSH STATUS;
+INSERT INTO t1 VALUES (0, "first row, p0");
+SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS
+WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0;
+VARIABLE_NAME	VARIABLE_VALUE
+HANDLER_COMMIT	1
+HANDLER_EXTERNAL_LOCK	28
+HANDLER_WRITE	18
+# 28 locks (1 tables, 13 partitions lock/unlock)
+FLUSH STATUS;
+INSERT INTO t1 VALUES (0, "Second row, p0")
+ON DUPLICATE KEY UPDATE b = CONCAT(b, ", duplicate key");
+SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS
+WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0;
+VARIABLE_NAME	VARIABLE_VALUE
+HANDLER_COMMIT	1
+HANDLER_EXTERNAL_LOCK	28
+HANDLER_READ_KEY	1
+HANDLER_UPDATE	1
+HANDLER_WRITE	18
+# 28 locks (1 tables, 13 partitions lock/unlock)
+FLUSH STATUS;
+UPDATE t1 SET b = CONCAT(b, ", Updated") WHERE a = 0;
+SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS
+WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0;
+VARIABLE_NAME	VARIABLE_VALUE
+HANDLER_COMMIT	1
+HANDLER_EXTERNAL_LOCK	4
+HANDLER_READ_KEY	1
+HANDLER_UPDATE	1
+HANDLER_WRITE	17
+# 4 locks (1 tables, 1 partitions lock/unlock)
+FLUSH STATUS;
+UPDATE t1 SET a = 1, b = CONCAT(b, ", a was 0") WHERE a = 0;
+SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS
+WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0;
+VARIABLE_NAME	VARIABLE_VALUE
+HANDLER_COMMIT	1
+HANDLER_DELETE	1
+HANDLER_EXTERNAL_LOCK	28
+HANDLER_READ_KEY	2
+HANDLER_READ_RND	1
+HANDLER_WRITE	18
+# Updating partitioning column, no lock pruning
+# 28 locks (1 tables, 13 partitions lock/unlock)
+#
+# Test BEFORE INSERT TRIGGER not depending on partitioning column
+#
+DROP TRIGGER t1_before_insert;
+CREATE TRIGGER t1_before_insert BEFORE INSERT
+ON t1 FOR EACH ROW
+SET NEW.b = CONCAT("b: ", NEW.b);
+FLUSH STATUS;
+INSERT INTO t1 VALUES (0, "first row, p0");
+SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS
+WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0;
+VARIABLE_NAME	VARIABLE_VALUE
+HANDLER_COMMIT	1
+HANDLER_EXTERNAL_LOCK	4
+HANDLER_WRITE	18
+# 4 locks (1 tables, 1 partitions lock/unlock)
+FLUSH STATUS;
+INSERT INTO t1 VALUES (0, "Second row, p0")
+ON DUPLICATE KEY UPDATE b = CONCAT(b, ", duplicate key");
+SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS
+WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0;
+VARIABLE_NAME	VARIABLE_VALUE
+HANDLER_COMMIT	1
+HANDLER_EXTERNAL_LOCK	4
+HANDLER_READ_KEY	1
+HANDLER_UPDATE	1
+HANDLER_WRITE	18
+# 4 locks (1 tables, 1 partitions lock/unlock)
+FLUSH STATUS;
+UPDATE t1 SET b = CONCAT(b, ", Updated") WHERE a = 0;
+SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS
+WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0;
+VARIABLE_NAME	VARIABLE_VALUE
+HANDLER_COMMIT	1
+HANDLER_EXTERNAL_LOCK	4
+HANDLER_READ_KEY	1
+HANDLER_UPDATE	1
+HANDLER_WRITE	17
+# 4 locks (1 tables, 1 partitions lock/unlock)
+FLUSH STATUS;
+UPDATE t1 SET a = 2, b = CONCAT(b, ", a was 0") WHERE a = 0;
+SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS
+WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0;
+VARIABLE_NAME	VARIABLE_VALUE
+HANDLER_COMMIT	1
+HANDLER_DELETE	1
+HANDLER_EXTERNAL_LOCK	28
+HANDLER_READ_KEY	2
+HANDLER_READ_RND	1
+HANDLER_WRITE	18
+# Updating partitioning column, no lock pruning
+# 28 locks (1 tables, 13 partitions lock/unlock)
+#
+# Test BEFORE UPDATE TRIGGER OLD depending on partitioning column.
+# Note that it does not update any partitioning column.
+#
+CREATE TRIGGER t1_before_update BEFORE UPDATE
+ON t1 FOR EACH ROW
+SET NEW.b = CONCAT("old a: ", OLD.a, " new b: ", NEW.b);
+FLUSH STATUS;
+INSERT INTO t1 VALUES (0, "1st p0");
+SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS
+WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0;
+VARIABLE_NAME	VARIABLE_VALUE
+HANDLER_COMMIT	1
+HANDLER_EXTERNAL_LOCK	4
+HANDLER_WRITE	18
+# 4 locks (1 tables, 1 partitions lock/unlock)
+FLUSH STATUS;
+INSERT INTO t1 VALUES (0, "2nd p0")
+ON DUPLICATE KEY UPDATE b = CONCAT(b, ", dup key");
+SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS
+WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0;
+VARIABLE_NAME	VARIABLE_VALUE
+HANDLER_COMMIT	1
+HANDLER_EXTERNAL_LOCK	28
+HANDLER_READ_KEY	1
+HANDLER_UPDATE	1
+HANDLER_WRITE	18
+# 28 locks (1 tables, 13 partitions lock/unlock)
+FLUSH STATUS;
+UPDATE t1 SET b = CONCAT(b, ", Updated") WHERE a = 0;
+SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS
+WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0;
+VARIABLE_NAME	VARIABLE_VALUE
+HANDLER_COMMIT	1
+HANDLER_EXTERNAL_LOCK	4
+HANDLER_READ_KEY	1
+HANDLER_UPDATE	1
+HANDLER_WRITE	17
+# Lock pruning possible!
+# 4 locks (1 tables, 1 partitions lock/unlock)
+FLUSH STATUS;
+UPDATE t1 SET a = 3, b = CONCAT(b, ", a was 0") WHERE a = 0;
+SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS
+WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0;
+VARIABLE_NAME	VARIABLE_VALUE
+HANDLER_COMMIT	1
+HANDLER_DELETE	1
+HANDLER_EXTERNAL_LOCK	28
+HANDLER_READ_KEY	2
+HANDLER_READ_RND	1
+HANDLER_WRITE	18
+# Updating partitioning column, no lock pruning
+# 28 locks (1 tables, 13 partitions lock/unlock)
+#
+# Test BEFORE UPDATE TRIGGER NEW depending on partitioning column.
+# Note that it does not update any partitioning column.
+#
+DROP TRIGGER t1_before_update;
+CREATE TRIGGER t1_before_update BEFORE UPDATE
+ON t1 FOR EACH ROW
+SET NEW.b = CONCAT("new a: ", NEW.a, " new b: ", NEW.b);
+FLUSH STATUS;
+INSERT INTO t1 VALUES (0, "1st p0");
+SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS
+WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0;
+VARIABLE_NAME	VARIABLE_VALUE
+HANDLER_COMMIT	1
+HANDLER_EXTERNAL_LOCK	4
+HANDLER_WRITE	18
+# 4 locks (1 tables, 1 partitions lock/unlock)
+FLUSH STATUS;
+INSERT INTO t1 VALUES (0, "2nd p0")
+ON DUPLICATE KEY UPDATE b = CONCAT(b, ", dup key");
+SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS
+WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0;
+VARIABLE_NAME	VARIABLE_VALUE
+HANDLER_COMMIT	1
+HANDLER_EXTERNAL_LOCK	28
+HANDLER_READ_KEY	1
+HANDLER_UPDATE	1
+HANDLER_WRITE	18
+# 28 locks (1 tables, 13 partitions lock/unlock)
+FLUSH STATUS;
+UPDATE t1 SET b = CONCAT(b, ", Updated") WHERE a = 0;
+SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS
+WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0;
+VARIABLE_NAME	VARIABLE_VALUE
+HANDLER_COMMIT	1
+HANDLER_EXTERNAL_LOCK	28
+HANDLER_READ_KEY	2
+HANDLER_READ_RND	1
+HANDLER_UPDATE	1
+HANDLER_WRITE	17
+# 28 locks (1 tables, 13 partitions lock/unlock)
+FLUSH STATUS;
+UPDATE t1 SET a = 4, b = CONCAT(b, ", a was 0") WHERE a = 0;
+SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS
+WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0;
+VARIABLE_NAME	VARIABLE_VALUE
+HANDLER_COMMIT	1
+HANDLER_DELETE	1
+HANDLER_EXTERNAL_LOCK	28
+HANDLER_READ_KEY	2
+HANDLER_READ_RND	1
+HANDLER_WRITE	18
+# Updating partitioning column, no lock pruning
+# 28 locks (1 tables, 13 partitions lock/unlock)
+#
+# Test BEFORE UPDATE TRIGGER not depending on partitioning column
+#
+DROP TRIGGER t1_before_update;
+CREATE TRIGGER t1_before_update BEFORE UPDATE
+ON t1 FOR EACH ROW
+SET NEW.b = CONCAT("new b: ", NEW.b);
+FLUSH STATUS;
+INSERT INTO t1 VALUES (0, "1st p0");
+SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS
+WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0;
+VARIABLE_NAME	VARIABLE_VALUE
+HANDLER_COMMIT	1
+HANDLER_EXTERNAL_LOCK	4
+HANDLER_WRITE	18
+# 4 locks (1 tables, 1 partitions lock/unlock)
+FLUSH STATUS;
+INSERT INTO t1 VALUES (0, "2nd p0")
+ON DUPLICATE KEY UPDATE b = CONCAT(b, ", dup key");
+SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS
+WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0;
+VARIABLE_NAME	VARIABLE_VALUE
+HANDLER_COMMIT	1
+HANDLER_EXTERNAL_LOCK	4
+HANDLER_READ_KEY	1
+HANDLER_UPDATE	1
+HANDLER_WRITE	18
+# 4 locks (1 tables, 1 partitions lock/unlock)
+FLUSH STATUS;
+UPDATE t1 SET b = CONCAT(b, ", Updated") WHERE a = 0;
+SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS
+WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0;
+VARIABLE_NAME	VARIABLE_VALUE
+HANDLER_COMMIT	1
+HANDLER_EXTERNAL_LOCK	4
+HANDLER_READ_KEY	1
+HANDLER_UPDATE	1
+HANDLER_WRITE	17
+# 4 locks (1 tables, 1 partitions lock/unlock)
+FLUSH STATUS;
+UPDATE t1 SET a = 5, b = CONCAT(b, ", a was 0") WHERE a = 0;
+SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS
+WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0;
+VARIABLE_NAME	VARIABLE_VALUE
+HANDLER_COMMIT	1
+HANDLER_DELETE	1
+HANDLER_EXTERNAL_LOCK	28
+HANDLER_READ_KEY	2
+HANDLER_READ_RND	1
+HANDLER_WRITE	18
+# Updating partitioning column, no lock pruning
+# 28 locks (1 tables, 13 partitions lock/unlock)
+SELECT * FROM t1 ORDER BY a;
+a	b
+1	b: first row, p0 a: 0, duplicate key, Updated, a was 0
+2	b: first row, p0, duplicate key, Updated, a was 0
+3	old a: 0 new b: old a: 0 new b: old a: 0 new b: b: 1st p0, dup key, Updated, a was 0
+4	new a: 4 new b: new a: 0 new b: new a: 0 new b: b: 1st p0, dup key, Updated, a was 0
+5	new b: new b: new b: b: 1st p0, dup key, Updated, a was 0
+#
+# Test of BEFORE UPDATE triggers and multi UPDATE
+#
+DROP TRIGGER t1_before_insert;
+DROP TRIGGER t1_before_update;
+TRUNCATE TABLE t1;
+TRUNCATE TABLE t2;
+INSERT INTO t1 VALUES (1, "MultiUpdate1");
+INSERT INTO t1 VALUES (2, "MultiUpdate2");
+INSERT INTO t2 VALUES (1, "MultiUpdate1");
+INSERT INTO t2 VALUES (2, "MultiUpdate2");
+CREATE TRIGGER t1_before_update BEFORE UPDATE
+ON t1 FOR EACH ROW
+SET NEW.b = CONCAT("new1 b: ", NEW.b);
+CREATE TRIGGER t2_before_update BEFORE UPDATE
+ON t2 FOR EACH ROW
+SET NEW.b = CONCAT("new2 a: ", NEW.a, " new2 b: ", NEW.b);
+FLUSH STATUS;
+UPDATE t1, t2
+SET t1.b = CONCAT(t1.b, ",(1) t2.b:", t2.b),
+t2.b = CONCAT(t2.b, ",(1) t1.b:", t1.b)
+WHERE t2.b = t1.b and t1.a = 1;
+SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS
+WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0;
+VARIABLE_NAME	VARIABLE_VALUE
+HANDLER_COMMIT	1
+HANDLER_EXTERNAL_LOCK	32
+HANDLER_READ_FIRST	13
+HANDLER_READ_KEY	15
+HANDLER_READ_RND	1
+HANDLER_READ_RND_NEXT	17
+HANDLER_UPDATE	2
+HANDLER_WRITE	18
+# 32 locks (2 table, 13 + 1 partitions lock/unlock)
+# 15 read_key
+# 1 read_next, read_rnd
+# 2 rean_rnd_next
+# 2 update
+FLUSH STATUS;
+UPDATE t1, t2
+SET t1.b = CONCAT(t1.b, ",(2) t2.b:", t2.b),
+t2.b = CONCAT(t2.b, ",(2) t1.b:", t1.b)
+WHERE t1.b = t2.b and t2.a = 2;
+SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS
+WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0;
+VARIABLE_NAME	VARIABLE_VALUE
+HANDLER_COMMIT	1
+HANDLER_EXTERNAL_LOCK	32
+HANDLER_READ_KEY	15
+HANDLER_READ_NEXT	1
+HANDLER_READ_RND	1
+HANDLER_READ_RND_NEXT	2
+HANDLER_UPDATE	2
+HANDLER_WRITE	18
+# 32 locks (2 table, 13 + 1 partitions lock/unlock)
+# 15 read_key
+# 1 read_next, read_rnd
+# 2 rean_rnd_next
+# 2 update
+SELECT * FROM t1 ORDER BY a;
+a	b
+1	new1 b: MultiUpdate1,(1) t2.b:MultiUpdate1
+2	new1 b: MultiUpdate2,(2) t2.b:MultiUpdate2
+# TODO: FIXME: Why is the first row in t2 also affected by t1 trigger?
+SELECT * FROM t2 ORDER BY a;
+a	b
+1	new2 a: 1 new2 b: MultiUpdate1,(1) t1.b:new1 b: MultiUpdate1,(1) t2.b:MultiUpdate1
+2	new2 a: 2 new2 b: MultiUpdate2,(2) t1.b:MultiUpdate2
 DROP TABLE t1, t2, t3;
 #
 # DO is not supported by WL#4443 !!!

=== modified file 'mysql-test/t/partition_locking.test'
--- a/mysql-test/t/partition_locking.test	revid:mattias.jonsson@stripped
+++ b/mysql-test/t/partition_locking.test	revid:mattias.jonsson@stripped
@@ -73,6 +73,23 @@ eval $get_handler_status_counts;
 --echo # 28 locks (table + 13 partition lock/unlock)
 --echo # 1 commit
 
+FLUSH STATUS;
+INSERT INTO t2 VALUES (10, "First row, p10");
+eval $get_handler_status_counts;
+--echo # Insert pruning on tables with auto increment is not yet supported
+--echo # 28 locks (table + 13 partition lock/unlock)
+--echo # 1 commit
+
+
+--echo #
+--echo # UPDATE with auto increment, lock pruning
+--echo #
+FLUSH STATUS;
+UPDATE t2 SET b = CONCAT(b, ", UPDATED") WHERE a = 10;
+eval $get_handler_status_counts;
+--echo # 4 locks (table + 1 partition lock/unlock)
+--echo # 1 read_key + 1 update + 1 commit
+
 --echo #
 --echo # Test of pruning with secondary column auto_inc INSERT
 --echo #
@@ -1369,7 +1386,7 @@ SELECT * FROM t2 ORDER BY a;
 --echo # Test triggers
 --echo # Tables used in triggers cannot be pruned for locks.
 --echo # Tables with triggers cannot be pruned for locks if
---echo # BEFORE INSERT/UPDATE trigger exists.
+--echo # BEFORE INSERT/UPDATE trigger uses any partitioning columns.
 --echo #
 CREATE TABLE t3
 (old_a int,
@@ -1501,38 +1518,224 @@ eval $get_handler_status_counts;
 --echo # 28 locks (3 tables, 1 + 5 + 5 partitions)
 --echo # t1, t3 before delete trigger, t3 after delete trigger
 
-INSERT INTO t2 VALUES (1, "First row, p1");
-INSERT INTO t2 VALUES (3, "First row, p3");
-CREATE TRIGGER t2_before_insert BEFORE INSERT
-ON t2 FOR EACH ROW
-SET NEW.a = NEW.a + 1;
+SELECT * FROM t1 ORDER BY a;
+SELECT * FROM t2 ORDER BY a;
+--sorted_result
+SELECT * FROM t3 ORDER BY new_a;
+TRUNCATE TABLE t1;
 
-CREATE TRIGGER t2_before_update BEFORE UPDATE
-ON t2 FOR EACH ROW
-SET NEW.a = NEW.a - 1;
+DROP TRIGGER t1_before_insert;
+DROP TRIGGER t1_before_update;
+DROP TRIGGER t1_before_delete;
+DROP TRIGGER t1_after_insert;
+DROP TRIGGER t1_after_update;
+DROP TRIGGER t1_after_delete;
+
+--echo #
+--echo # Test BEFORE INSERT TRIGGER depending on partitioning column
+--echo #
+CREATE TRIGGER t1_before_insert BEFORE INSERT
+ON t1 FOR EACH ROW
+SET NEW.b = CONCAT("b: ", NEW.b, " a: ", NEW.a);
 
 FLUSH STATUS;
-INSERT INTO t2 VALUES (1, "Second row, p1");
+INSERT INTO t1 VALUES (0, "first row, p0");
 eval $get_handler_status_counts;
 --echo # 28 locks (1 tables, 13 partitions lock/unlock)
-SELECT * FROM t2 ORDER BY a;
 
 FLUSH STATUS;
---error ER_DUP_ENTRY
-UPDATE t2 SET b = CONCAT(b, ", UPDATED3") WHERE a = 4;
+INSERT INTO t1 VALUES (0, "Second row, p0")
+ON DUPLICATE KEY UPDATE b = CONCAT(b, ", duplicate key");
+eval $get_handler_status_counts;
+--echo # 28 locks (1 tables, 13 partitions lock/unlock)
+
+FLUSH STATUS;
+UPDATE t1 SET b = CONCAT(b, ", Updated") WHERE a = 0;
+eval $get_handler_status_counts;
+--echo # 4 locks (1 tables, 1 partitions lock/unlock)
+
+FLUSH STATUS;
+UPDATE t1 SET a = 1, b = CONCAT(b, ", a was 0") WHERE a = 0;
+eval $get_handler_status_counts;
+--echo # Updating partitioning column, no lock pruning
+--echo # 28 locks (1 tables, 13 partitions lock/unlock)
+
+--echo #
+--echo # Test BEFORE INSERT TRIGGER not depending on partitioning column
+--echo #
+DROP TRIGGER t1_before_insert;
+
+CREATE TRIGGER t1_before_insert BEFORE INSERT
+ON t1 FOR EACH ROW
+SET NEW.b = CONCAT("b: ", NEW.b);
+
+FLUSH STATUS;
+INSERT INTO t1 VALUES (0, "first row, p0");
+eval $get_handler_status_counts;
+--echo # 4 locks (1 tables, 1 partitions lock/unlock)
+
+FLUSH STATUS;
+INSERT INTO t1 VALUES (0, "Second row, p0")
+ON DUPLICATE KEY UPDATE b = CONCAT(b, ", duplicate key");
 eval $get_handler_status_counts;
+--echo # 4 locks (1 tables, 1 partitions lock/unlock)
+
+FLUSH STATUS;
+UPDATE t1 SET b = CONCAT(b, ", Updated") WHERE a = 0;
+eval $get_handler_status_counts;
+--echo # 4 locks (1 tables, 1 partitions lock/unlock)
+
+FLUSH STATUS;
+UPDATE t1 SET a = 2, b = CONCAT(b, ", a was 0") WHERE a = 0;
+eval $get_handler_status_counts;
+--echo # Updating partitioning column, no lock pruning
 --echo # 28 locks (1 tables, 13 partitions lock/unlock)
 
+--echo #
+--echo # Test BEFORE UPDATE TRIGGER OLD depending on partitioning column.
+--echo # Note that it does not update any partitioning column.
+--echo #
+CREATE TRIGGER t1_before_update BEFORE UPDATE
+ON t1 FOR EACH ROW
+SET NEW.b = CONCAT("old a: ", OLD.a, " new b: ", NEW.b);
+
 FLUSH STATUS;
-UPDATE t2 SET b = CONCAT(b, ", UPDATED3") WHERE a = 1;
+INSERT INTO t1 VALUES (0, "1st p0");
+eval $get_handler_status_counts;
+--echo # 4 locks (1 tables, 1 partitions lock/unlock)
+
+FLUSH STATUS;
+INSERT INTO t1 VALUES (0, "2nd p0")
+ON DUPLICATE KEY UPDATE b = CONCAT(b, ", dup key");
 eval $get_handler_status_counts;
 --echo # 28 locks (1 tables, 13 partitions lock/unlock)
 
+FLUSH STATUS;
+UPDATE t1 SET b = CONCAT(b, ", Updated") WHERE a = 0;
+eval $get_handler_status_counts;
+--echo # Lock pruning possible!
+--echo # 4 locks (1 tables, 1 partitions lock/unlock)
+
+FLUSH STATUS;
+UPDATE t1 SET a = 3, b = CONCAT(b, ", a was 0") WHERE a = 0;
+eval $get_handler_status_counts;
+--echo # Updating partitioning column, no lock pruning
+--echo # 28 locks (1 tables, 13 partitions lock/unlock)
+
+--echo #
+--echo # Test BEFORE UPDATE TRIGGER NEW depending on partitioning column.
+--echo # Note that it does not update any partitioning column.
+--echo #
+DROP TRIGGER t1_before_update;
+
+CREATE TRIGGER t1_before_update BEFORE UPDATE
+ON t1 FOR EACH ROW
+SET NEW.b = CONCAT("new a: ", NEW.a, " new b: ", NEW.b);
+
+FLUSH STATUS;
+INSERT INTO t1 VALUES (0, "1st p0");
+eval $get_handler_status_counts;
+--echo # 4 locks (1 tables, 1 partitions lock/unlock)
+
+FLUSH STATUS;
+INSERT INTO t1 VALUES (0, "2nd p0")
+ON DUPLICATE KEY UPDATE b = CONCAT(b, ", dup key");
+eval $get_handler_status_counts;
+--echo # 28 locks (1 tables, 13 partitions lock/unlock)
+
+FLUSH STATUS;
+UPDATE t1 SET b = CONCAT(b, ", Updated") WHERE a = 0;
+eval $get_handler_status_counts;
+--echo # 28 locks (1 tables, 13 partitions lock/unlock)
+
+FLUSH STATUS;
+UPDATE t1 SET a = 4, b = CONCAT(b, ", a was 0") WHERE a = 0;
+eval $get_handler_status_counts;
+--echo # Updating partitioning column, no lock pruning
+--echo # 28 locks (1 tables, 13 partitions lock/unlock)
+
+
+--echo #
+--echo # Test BEFORE UPDATE TRIGGER not depending on partitioning column
+--echo #
+DROP TRIGGER t1_before_update;
+
+CREATE TRIGGER t1_before_update BEFORE UPDATE
+ON t1 FOR EACH ROW
+SET NEW.b = CONCAT("new b: ", NEW.b);
+
+FLUSH STATUS;
+INSERT INTO t1 VALUES (0, "1st p0");
+eval $get_handler_status_counts;
+--echo # 4 locks (1 tables, 1 partitions lock/unlock)
+
+FLUSH STATUS;
+INSERT INTO t1 VALUES (0, "2nd p0")
+ON DUPLICATE KEY UPDATE b = CONCAT(b, ", dup key");
+eval $get_handler_status_counts;
+--echo # 4 locks (1 tables, 1 partitions lock/unlock)
+
+FLUSH STATUS;
+UPDATE t1 SET b = CONCAT(b, ", Updated") WHERE a = 0;
+eval $get_handler_status_counts;
+--echo # 4 locks (1 tables, 1 partitions lock/unlock)
+
+FLUSH STATUS;
+UPDATE t1 SET a = 5, b = CONCAT(b, ", a was 0") WHERE a = 0;
+eval $get_handler_status_counts;
+--echo # Updating partitioning column, no lock pruning
+--echo # 28 locks (1 tables, 13 partitions lock/unlock)
+
+SELECT * FROM t1 ORDER BY a;
+
+--echo #
+--echo # Test of BEFORE UPDATE triggers and multi UPDATE
+--echo #
+DROP TRIGGER t1_before_insert;
+DROP TRIGGER t1_before_update;
+TRUNCATE TABLE t1;
+TRUNCATE TABLE t2;
+INSERT INTO t1 VALUES (1, "MultiUpdate1");
+INSERT INTO t1 VALUES (2, "MultiUpdate2");
+INSERT INTO t2 VALUES (1, "MultiUpdate1");
+INSERT INTO t2 VALUES (2, "MultiUpdate2");
+
+CREATE TRIGGER t1_before_update BEFORE UPDATE
+ON t1 FOR EACH ROW
+SET NEW.b = CONCAT("new1 b: ", NEW.b);
+
+CREATE TRIGGER t2_before_update BEFORE UPDATE
+ON t2 FOR EACH ROW
+SET NEW.b = CONCAT("new2 a: ", NEW.a, " new2 b: ", NEW.b);
+
+FLUSH STATUS;
+UPDATE t1, t2
+SET t1.b = CONCAT(t1.b, ",(1) t2.b:", t2.b),
+    t2.b = CONCAT(t2.b, ",(1) t1.b:", t1.b)
+WHERE t2.b = t1.b and t1.a = 1;
+eval $get_handler_status_counts;
+--echo # 32 locks (2 table, 13 + 1 partitions lock/unlock)
+--echo # 15 read_key
+--echo # 1 read_next, read_rnd
+--echo # 2 rean_rnd_next
+--echo # 2 update
+
+FLUSH STATUS;
+UPDATE t1, t2
+SET t1.b = CONCAT(t1.b, ",(2) t2.b:", t2.b),
+    t2.b = CONCAT(t2.b, ",(2) t1.b:", t1.b)
+WHERE t1.b = t2.b and t2.a = 2;
+eval $get_handler_status_counts;
+--echo # 32 locks (2 table, 13 + 1 partitions lock/unlock)
+--echo # 15 read_key
+--echo # 1 read_next, read_rnd
+--echo # 2 rean_rnd_next
+--echo # 2 update
 
 SELECT * FROM t1 ORDER BY a;
+--echo # TODO: FIXME: Why is the first row in t2 also affected by t1 trigger?
 SELECT * FROM t2 ORDER BY a;
---sorted_result
-SELECT * FROM t3 ORDER BY new_a;
+
 DROP TABLE t1, t2, t3;
 
 --echo #

=== modified file 'sql/partition_info.cc'
--- a/sql/partition_info.cc	revid:mattias.jonsson@stripped
+++ b/sql/partition_info.cc	revid:mattias.jonsson@stripped
@@ -279,11 +279,15 @@ bool partition_info::can_prune_insert(TH
     If under LOCK TABLES pruning will skip start_stmt instead of external_lock
     for unused partitions.
 
-    Cannot prune if there are BEFORE INSERT triggers,
-    since they may change the row to be in another partition.
+    Cannot prune if there are BEFORE INSERT triggers that changes any
+    partitioning column, since they may change the row to be in another
+    partition.
   */
   if (table->triggers &&
-      table->triggers->has_triggers(TRG_EVENT_INSERT, TRG_ACTION_BEFORE))
+      table->triggers->has_triggers(TRG_EVENT_INSERT, TRG_ACTION_BEFORE) &&
+      table->triggers->is_fields_used_in_trigger(&full_part_field_set,
+                                                 TRG_EVENT_INSERT,
+                                                 TRG_ACTION_BEFORE))
     DBUG_RETURN(false);
 
   if (table->found_next_number_field)
@@ -323,13 +327,19 @@ bool partition_info::can_prune_insert(TH
       DBUG_RETURN(false);
 
     /*
-      Cannot prune if there are BEFORE UPDATE triggers,
-      since they may change the row to be in another partition.
+      Cannot prune if there are BEFORE UPDATE triggers that changes any
+      partitioning column, since they may change the row to be in another
+      partition.
     */
     if (table->triggers &&
         table->triggers->has_triggers(TRG_EVENT_UPDATE,
+                                      TRG_ACTION_BEFORE) &&
+        table->triggers->is_fields_used_in_trigger(&full_part_field_set,
+                                      TRG_EVENT_UPDATE,
                                       TRG_ACTION_BEFORE))
+    {
       DBUG_RETURN(false);
+    }
   }
 
   /*

=== modified file 'sql/sql_trigger.cc'
--- a/sql/sql_trigger.cc	revid:mattias.jonsson@stripped
+++ b/sql/sql_trigger.cc	revid:mattias.jonsson@stripped
@@ -2213,6 +2213,35 @@ add_tables_and_routines_for_triggers(THD
 
 
 /**
+  Check if any of the marked fields are used in the trigger.
+
+  @param used_fields  Bitmap over fields to check
+  @param event_type   Type of event triggers for which we are going to inspect
+  @param action_time  Type of trigger action time we are going to inspect
+*/
+
+bool Table_triggers_list::is_fields_used_in_trigger(MY_BITMAP *used_fields,
+                                                    trg_event_type event_type,
+                                                    trg_action_time_type action_time)
+{
+  Item_trigger_field *trg_field;
+  DBUG_ASSERT(used_fields->n_bits == trigger_table->s->fields);
+
+  for (trg_field= trigger_fields[event_type][action_time]; trg_field;
+       trg_field= trg_field->next_trg_field)
+  {
+    /* We cannot check fields which does not present in table. */
+    if (trg_field->field_idx != (uint)-1)
+    {
+      if (bitmap_is_set(used_fields, trg_field->field_idx))
+        return true;
+    }
+  }
+  return false;
+}
+
+
+/**
   Mark fields of subject table which we read/set in its triggers
   as such.
 
@@ -2221,7 +2250,6 @@ add_tables_and_routines_for_triggers(THD
   and thus informs handler that values for these fields should be
   retrieved/stored during execution of statement.
 
-  @param thd    Current thread context
   @param event  Type of event triggers for which we are going to inspect
 */
 

=== modified file 'sql/sql_trigger.h'
--- a/sql/sql_trigger.h	revid:mattias.jonsson@stripped
+++ b/sql/sql_trigger.h	revid:mattias.jonsson@stripped
@@ -206,7 +206,9 @@ public:
   bool add_tables_and_routines_for_triggers(THD *thd,
                                             Query_tables_list *prelocking_ctx,
                                             TABLE_LIST *table_list);
-
+  bool is_fields_used_in_trigger(MY_BITMAP *used_fields,
+                                 trg_event_type event_type,
+                                 trg_action_time_type action_time);
 private:
   bool prepare_record1_accessors();
   LEX_STRING* change_table_name_in_trignames(const char *old_db_name,

=== modified file 'sql/sql_update.cc'
--- a/sql/sql_update.cc	revid:mattias.jonsson@stripped
+++ b/sql/sql_update.cc	revid:mattias.jonsson@stripped
@@ -408,45 +408,15 @@ int mysql_update(THD *thd,
   // Don't count on usage of 'only index' when calculating which key to use
   table->covering_keys.clear_all();
 
+  /*
+    This must be done before partitioning pruning, since prune_partitions()
+    uses the table->write_set to determine may prune locks too.
+  */
+  table->mark_columns_needed_for_update();
+
 #ifdef WITH_PARTITION_STORAGE_ENGINE
   if (table->part_info)
   {
-    bool prune_locks= true;
-    MY_BITMAP lock_partitions;
-    if (table->triggers &&
-        table->triggers->has_triggers(TRG_EVENT_UPDATE, TRG_ACTION_BEFORE))
-    {
-      /*
-        BEFORE UPDATE triggers may change the records partitioning
-        column, forcing it to another partition.
-        So it is not possible to prune external_lock/start_stmt for
-        partitions (lock_partitions bitmap), only for scanning
-        (read_partitions bitmap).
-        Copy the current lock_partitions bitmap and restore it after
-        prune_partitions call, to lock all non explicitly selected
-        partitions.
-      */
-      uint32 *bitmap_buf;
-      uint bitmap_bytes;
-      uint num_partitions;
-      DBUG_ASSERT(table->part_info->bitmaps_are_initialized);
-      prune_locks= false;
-      num_partitions= table->part_info->lock_partitions.n_bits;
-      bitmap_bytes= bitmap_buffer_size(num_partitions);
-      if (!(bitmap_buf= (uint32*) thd->alloc(bitmap_bytes)))
-      {
-        mem_alloc_error(bitmap_bytes);
-        DBUG_RETURN(1);
-      }
-      /* Also clears all bits. */
-      if (bitmap_init(&lock_partitions, bitmap_buf, num_partitions, FALSE))
-      {
-        mem_alloc_error(bitmap_bytes);   /* Cannot happen, due to pre-alloc */
-        DBUG_RETURN(1);
-      }
-      bitmap_copy(&lock_partitions, &table->part_info->lock_partitions);
-    }
-
     if (prune_partitions(thd, table, conds))
       DBUG_RETURN(1);
     if (table->all_partitions_pruned_away)
@@ -461,8 +431,6 @@ int mysql_update(THD *thd,
       my_ok(thd);                            // No matching records
       DBUG_RETURN(0);
     }
-    if (!prune_locks)
-      bitmap_copy(&table->part_info->lock_partitions, &lock_partitions);
   }
 #endif
   if (lock_tables(thd, table_list, thd->lex->table_count, 0))
@@ -471,7 +439,6 @@ int mysql_update(THD *thd,
   /* Update the table->file->stats.records number */
   table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
 
-  table->mark_columns_needed_for_update();
   select= make_select(table, 0, 0, conds, 0, &error);
 
   { // Enter scope for optimizer trace wrapper

No bundle (reason: useless for push emails).
Thread
bzr push into mysql-trunk branch (mattias.jonsson:3830 to 3831) WL#4443Mattias Jonsson16 May