Merge lp:~sergei.glushchenko/percona-server/5.5-ps-bug1192354 into lp:percona-server/5.5

Proposed by Sergei Glushchenko
Status: Merged
Approved by: Alexey Kopytov
Approved revision: no longer in the source branch.
Merged at revision: 543
Proposed branch: lp:~sergei.glushchenko/percona-server/5.5-ps-bug1192354
Merge into: lp:percona-server/5.5
Diff against target: 181 lines (+103/-4)
3 files modified
Percona-Server/mysql-test/r/percona_bug1192354.result (+39/-0)
Percona-Server/mysql-test/t/percona_bug1192354.test (+38/-0)
Percona-Server/sql/sql_show.cc (+26/-4)
To merge this branch: bzr merge lp:~sergei.glushchenko/percona-server/5.5-ps-bug1192354
Reviewer Review Type Date Requested Status
Alexey Kopytov (community) Approve
Review via email: mp+171082@code.launchpad.net

Description of the change

Bug 1192354: accessing information_schema.partitions causes plans to change
ha_innodb::info stores statistics for keys in the table structure, which is
shared for all the partitions of the table.
When ha_partition::info is invoked, it calls ha_innodb::info for every
partition, determines which has the maximum number of rows and invokes
ha_innodb::info again for this partition. It sets partitioned table
statistics to the statistics of the largest part.
When i_s.partitions is invoked, it calls ha_innodb::info for every partition
as well, but it doesn't perform the last call to set statistics for the
whole table back. So, the table statistics is set to statistics of the last
query, which is not the best choice in most cases.
This patch implements behavior for i_s.partitions similar to behavior of
ha_partition::info.

To post a comment you must log in.
Revision history for this message
Sergei Glushchenko (sergei.glushchenko) wrote :
Revision history for this message
Alexey Kopytov (akopytov) wrote :

Sergei,

Looks good in general, just minor comments:

  - the fix is not specific to InnoDB. If you want to have InnoDB-specific
    coverage in the test case, it should have --source include/have_innodb.inc
  - the test case should include have_partition.inc as well
  - the fix should also reset table index stats on errors in get_schema_partitions_record()

review: Needs Fixing
Revision history for this message
Sergei Glushchenko (sergei.glushchenko) wrote :

Hi Alexey,

I have added includes into test case.
As for errors in get_schema_partitions_record, seems they are rare. So much rare, that debug and non-debug builds handle them different. However I addressed the issue the most non-intruisive way. I've inserted invocation of file->info in case of error. This will cause file->info for all partitions, even if they were queried before an error occurred.

http://jenkins.percona.com/view/PS%205.5/job/percona-server-5.5-param/776/

Revision history for this message
Alexey Kopytov (akopytov) :
review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== added file 'Percona-Server/mysql-test/r/percona_bug1192354.result'
2--- Percona-Server/mysql-test/r/percona_bug1192354.result 1970-01-01 00:00:00 +0000
3+++ Percona-Server/mysql-test/r/percona_bug1192354.result 2013-06-25 15:57:30 +0000
4@@ -0,0 +1,39 @@
5+DROP TABLE IF EXISTS t1;
6+Warnings:
7+Note 1051 Unknown table 't1'
8+CREATE TABLE t1 (
9+c1 int,
10+c2 int,
11+PRIMARY KEY (c1, c2)
12+) ENGINE = InnoDB
13+PARTITION BY LIST (c1)
14+(PARTITION p0 VALUES IN (0) ENGINE = InnoDB,
15+PARTITION p1 VALUES IN (1) ENGINE = InnoDB);
16+INSERT INTO t1 VALUES (0, 0);
17+INSERT INTO t1 SELECT 0, c2+1 FROM t1;
18+INSERT INTO t1 SELECT 0, c2+2 FROM t1;
19+INSERT INTO t1 SELECT 0, c2+4 FROM t1;
20+INSERT INTO t1 SELECT 0, c2+8 FROM t1;
21+ANALYZE TABLE t1;
22+Table Op Msg_type Msg_text
23+test.t1 analyze status OK
24+SELECT cardinality FROM information_schema.statistics WHERE table_name = 't1';
25+cardinality
26+2
27+17
28+SELECT partition_name, table_rows FROM information_schema.partitions WHERE table_name = 't1';
29+partition_name table_rows
30+p0 16
31+p1 0
32+SELECT cardinality FROM information_schema.statistics WHERE table_name = 't1';
33+cardinality
34+2
35+17
36+ANALYZE TABLE t1;
37+Table Op Msg_type Msg_text
38+test.t1 analyze status OK
39+SELECT cardinality FROM information_schema.statistics WHERE table_name = 't1';
40+cardinality
41+2
42+17
43+DROP TABLE t1;
44
45=== added file 'Percona-Server/mysql-test/t/percona_bug1192354.test'
46--- Percona-Server/mysql-test/t/percona_bug1192354.test 1970-01-01 00:00:00 +0000
47+++ Percona-Server/mysql-test/t/percona_bug1192354.test 2013-06-25 15:57:30 +0000
48@@ -0,0 +1,38 @@
49+##############################################################################
50+# Bug 1192354: accessing information_schema.partitions causes plans to change
51+# MySQL bug: http://bugs.mysql.com/bug.php?id=69179
52+#
53+# This test creates partitioned table and quries it's statistics
54+# before and after querying information_schema.partitions
55+##############################################################################
56+
57+--source include/have_innodb.inc
58+--source include/have_partition.inc
59+
60+DROP TABLE IF EXISTS t1;
61+
62+CREATE TABLE t1 (
63+ c1 int,
64+ c2 int,
65+ PRIMARY KEY (c1, c2)
66+) ENGINE = InnoDB
67+PARTITION BY LIST (c1)
68+(PARTITION p0 VALUES IN (0) ENGINE = InnoDB,
69+ PARTITION p1 VALUES IN (1) ENGINE = InnoDB);
70+
71+INSERT INTO t1 VALUES (0, 0);
72+INSERT INTO t1 SELECT 0, c2+1 FROM t1;
73+INSERT INTO t1 SELECT 0, c2+2 FROM t1;
74+INSERT INTO t1 SELECT 0, c2+4 FROM t1;
75+INSERT INTO t1 SELECT 0, c2+8 FROM t1;
76+
77+ANALYZE TABLE t1;
78+SELECT cardinality FROM information_schema.statistics WHERE table_name = 't1';
79+
80+SELECT partition_name, table_rows FROM information_schema.partitions WHERE table_name = 't1';
81+SELECT cardinality FROM information_schema.statistics WHERE table_name = 't1';
82+
83+ANALYZE TABLE t1;
84+SELECT cardinality FROM information_schema.statistics WHERE table_name = 't1';
85+
86+DROP TABLE t1;
87
88=== modified file 'Percona-Server/sql/sql_show.cc'
89--- Percona-Server/sql/sql_show.cc 2013-06-01 15:04:43 +0000
90+++ Percona-Server/sql/sql_show.cc 2013-06-25 15:57:30 +0000
91@@ -6145,13 +6145,15 @@
92 static void store_schema_partitions_record(THD *thd, TABLE *schema_table,
93 TABLE *showing_table,
94 partition_element *part_elem,
95- handler *file, uint part_id)
96+ handler *file, uint part_id,
97+ ha_rows *records)
98 {
99 TABLE* table= schema_table;
100 CHARSET_INFO *cs= system_charset_info;
101 PARTITION_STATS stat_info;
102 MYSQL_TIME time;
103 file->get_dynamic_partition_info(&stat_info, part_id);
104+ *records= stat_info.records;
105 table->field[0]->store(STRING_WITH_LEN("def"), cs);
106 table->field[12]->store((longlong) stat_info.records, TRUE);
107 table->field[13]->store((longlong) stat_info.mean_rec_length, TRUE);
108@@ -6274,8 +6276,12 @@
109 String tmp_str;
110 TABLE *show_table= tables->table;
111 handler *file;
112+ ha_rows records;
113 #ifdef WITH_PARTITION_STORAGE_ENGINE
114 partition_info *part_info;
115+ uint handler_part_id= 0;
116+ ha_rows max_records= 0;
117+ PARTITION_STATS stat_info;
118 #endif
119 DBUG_ENTER("get_schema_partitions_record");
120
121@@ -6403,6 +6409,8 @@
122 list_value,
123 tmp_str))
124 {
125+ file->info(HA_STATUS_CONST | HA_STATUS_TIME | HA_STATUS_VARIABLE |
126+ HA_STATUS_VARIABLE_EXTRA | HA_STATUS_NO_LOCK);
127 DBUG_RETURN(1);
128 }
129 table->field[11]->store(tmp_str.ptr(), tmp_str.length(), cs);
130@@ -6441,6 +6449,8 @@
131 list_value,
132 tmp_str))
133 {
134+ file->info(HA_STATUS_CONST | HA_STATUS_TIME | HA_STATUS_VARIABLE |
135+ HA_STATUS_VARIABLE_EXTRA | HA_STATUS_NO_LOCK);
136 DBUG_RETURN(1);
137 }
138 if (part_info->part_field_list.elements > 1U)
139@@ -6477,27 +6487,39 @@
140 table->field[6]->set_notnull();
141
142 store_schema_partitions_record(thd, table, show_table, subpart_elem,
143- file, part_id);
144+ file, part_id, &records);
145+ handler_part_id= (records > max_records) ? part_id : handler_part_id;
146 part_id++;
147 if(schema_table_store_record(thd, table))
148+ {
149+ file->info(HA_STATUS_CONST | HA_STATUS_TIME | HA_STATUS_VARIABLE |
150+ HA_STATUS_VARIABLE_EXTRA | HA_STATUS_NO_LOCK);
151 DBUG_RETURN(1);
152+ }
153 }
154 }
155 else
156 {
157 store_schema_partitions_record(thd, table, show_table, part_elem,
158- file, part_id);
159+ file, part_id, &records);
160+ handler_part_id= (records > max_records) ? part_id : handler_part_id;
161 part_id++;
162 if(schema_table_store_record(thd, table))
163+ {
164+ file->info(HA_STATUS_CONST | HA_STATUS_TIME | HA_STATUS_VARIABLE |
165+ HA_STATUS_VARIABLE_EXTRA | HA_STATUS_NO_LOCK);
166 DBUG_RETURN(1);
167+ }
168 }
169 }
170+ file->get_dynamic_partition_info(&stat_info, handler_part_id);
171 DBUG_RETURN(0);
172 }
173 else
174 #endif
175 {
176- store_schema_partitions_record(thd, table, show_table, 0, file, 0);
177+ store_schema_partitions_record(thd, table, show_table, 0, file, 0,
178+ &records);
179 if(schema_table_store_record(thd, table))
180 DBUG_RETURN(1);
181 }

Subscribers

People subscribed via source and target branches