Merge lp:~laurynas-biveinis/percona-server/bug54330 into lp:percona-server/5.5

Proposed by Laurynas Biveinis
Status: Merged
Approved by: Alexey Kopytov
Approved revision: no longer in the source branch.
Merged at revision: 224
Proposed branch: lp:~laurynas-biveinis/percona-server/bug54330
Merge into: lp:percona-server/5.5
Diff against target: 249 lines (+237/-0)
2 files modified
patches/bug54330.patch (+236/-0)
patches/series (+1/-0)
To merge this branch: bzr merge lp:~laurynas-biveinis/percona-server/bug54330
Reviewer Review Type Date Requested Status
Alexey Kopytov (community) Approve
Review via email: mp+94510@code.launchpad.net

Description of the change

Fix bug 939485 (MySQL bugs 64432, 54330): fast index creation is
broken - the index contains only part of data.

This has been fixed upstream in 5.1 for bug 54330. The fix is
straightforwardly backported from there, revisions 3351.14.149 and
3351.54.1, but adds a testcase.

To post a comment you must log in.
Revision history for this message
Laurynas Biveinis (laurynas-biveinis) wrote :
Revision history for this message
Alexey Kopytov (akopytov) :
review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== added file 'patches/bug54330.patch'
2--- patches/bug54330.patch 1970-01-01 00:00:00 +0000
3+++ patches/bug54330.patch 2012-02-24 09:44:20 +0000
4@@ -0,0 +1,236 @@
5+--- a/storage/innobase/row/row0merge.c
6++++ b/storage/innobase/row/row0merge.c
7+@@ -1607,22 +1607,28 @@
8+ const dict_index_t* index, /*!< in: index being created */
9+ merge_file_t* file, /*!< in/out: file containing
10+ index entries */
11+- ulint* half, /*!< in/out: half the file */
12+ row_merge_block_t* block, /*!< in/out: 3 buffers */
13+ int* tmpfd, /*!< in/out: temporary file handle */
14+- struct TABLE* table) /*!< in/out: MySQL table, for
15++ struct TABLE* table, /*!< in/out: MySQL table, for
16+ reporting erroneous key value
17+ if applicable */
18++ ulint* num_run,/*!< in/out: Number of runs remain
19++ to be merged */
20++ ulint* run_offset) /*!< in/out: Array contains the
21++ first offset number for each merge
22++ run */
23+ {
24+ ulint foffs0; /*!< first input offset */
25+ ulint foffs1; /*!< second input offset */
26+ ulint error; /*!< error code */
27+ merge_file_t of; /*!< output file */
28+- const ulint ihalf = *half;
29++ const ulint ihalf = run_offset[*num_run / 2];
30+ /*!< half the input file */
31+- ulint ohalf; /*!< half the output file */
32++ ulint n_run = 0;
33++ /*!< num of runs generated from this merge */
34+
35+ UNIV_MEM_ASSERT_W(block[0], 3 * sizeof block[0]);
36++
37+ ut_ad(ihalf < file->offset);
38+
39+ of.fd = *tmpfd;
40+@@ -1638,17 +1644,20 @@
41+ #endif /* POSIX_FADV_SEQUENTIAL */
42+
43+ /* Merge blocks to the output file. */
44+- ohalf = 0;
45+ foffs0 = 0;
46+ foffs1 = ihalf;
47+
48++ UNIV_MEM_INVALID(run_offset, *num_run * sizeof *run_offset);
49++
50+ for (; foffs0 < ihalf && foffs1 < file->offset; foffs0++, foffs1++) {
51+- ulint ahalf; /*!< arithmetic half the input file */
52+
53+ if (UNIV_UNLIKELY(trx_is_interrupted(trx))) {
54+ return(DB_INTERRUPTED);
55+ }
56+
57++ /* Remember the offset number for this run */
58++ run_offset[n_run++] = of.offset;
59++
60+ error = row_merge_blocks(index, file, block,
61+ &foffs0, &foffs1, &of, table);
62+
63+@@ -1656,21 +1665,6 @@
64+ return(error);
65+ }
66+
67+- /* Record the offset of the output file when
68+- approximately half the output has been generated. In
69+- this way, the next invocation of row_merge() will
70+- spend most of the time in this loop. The initial
71+- estimate is ohalf==0. */
72+- ahalf = file->offset / 2;
73+- ut_ad(ohalf <= of.offset);
74+-
75+- /* Improve the estimate until reaching half the input
76+- file size, or we can not get any closer to it. All
77+- comparands should be non-negative when !(ohalf < ahalf)
78+- because ohalf <= of.offset. */
79+- if (ohalf < ahalf || of.offset - ahalf < ohalf - ahalf) {
80+- ohalf = of.offset;
81+- }
82+ }
83+
84+ /* Copy the last blocks, if there are any. */
85+@@ -1680,6 +1674,9 @@
86+ return(DB_INTERRUPTED);
87+ }
88+
89++ /* Remember the offset number for this run */
90++ run_offset[n_run++] = of.offset;
91++
92+ if (!row_merge_blocks_copy(index, file, block, &foffs0, &of)) {
93+ return(DB_CORRUPTION);
94+ }
95+@@ -1692,6 +1689,9 @@
96+ return(DB_INTERRUPTED);
97+ }
98+
99++ /* Remember the offset number for this run */
100++ run_offset[n_run++] = of.offset;
101++
102+ if (!row_merge_blocks_copy(index, file, block, &foffs1, &of)) {
103+ return(DB_CORRUPTION);
104+ }
105+@@ -1703,10 +1703,23 @@
106+ return(DB_CORRUPTION);
107+ }
108+
109++ ut_ad(n_run <= *num_run);
110++
111++ *num_run = n_run;
112++
113++ /* Each run can contain one or more offsets. As merge goes on,
114++ the number of runs (to merge) will reduce until we have one
115++ single run. So the number of runs will always be smaller than
116++ the number of offsets in file */
117++ ut_ad((*num_run) <= file->offset);
118++
119++ /* The number of offsets in output file is always equal or
120++ smaller than input file */
121++ ut_ad(of.offset <= file->offset);
122++
123+ /* Swap file descriptors for the next pass. */
124+ *tmpfd = file->fd;
125+ *file = of;
126+- *half = ohalf;
127+
128+ UNIV_MEM_INVALID(block[0], 3 * sizeof block[0]);
129+
130+@@ -1731,27 +1744,44 @@
131+ if applicable */
132+ {
133+ ulint half = file->offset / 2;
134++ ulint num_runs;
135++ ulint* run_offset;
136++ ulint error = DB_SUCCESS;
137++
138++ /* Record the number of merge runs we need to perform */
139++ num_runs = file->offset;
140++
141++ /* If num_runs are less than 1, nothing to merge */
142++ if (num_runs <= 1) {
143++ return(error);
144++ }
145++
146++ /* "run_offset" records each run's first offset number */
147++ run_offset = (ulint*) mem_alloc(file->offset * sizeof(ulint));
148++
149++ /* This tells row_merge() where to start for the first round
150++ of merge. */
151++ run_offset[half] = half;
152+
153+ /* The file should always contain at least one byte (the end
154+ of file marker). Thus, it must be at least one block. */
155+ ut_ad(file->offset > 0);
156+
157++ /* Merge the runs until we have one big run */
158+ do {
159+- ulint error;
160++ error = row_merge(trx, index, file, block, tmpfd,
161++ table, &num_runs, run_offset);
162+
163+- error = row_merge(trx, index, file, &half,
164+- block, tmpfd, table);
165++ UNIV_MEM_ASSERT_RW(run_offset, num_runs * sizeof *run_offset);
166+
167+ if (error != DB_SUCCESS) {
168+- return(error);
169++ break;
170+ }
171++ } while (num_runs > 1);
172+
173+- /* half > 0 should hold except when the file consists
174+- of one block. No need to merge further then. */
175+- ut_ad(half > 0 || file->offset == 1);
176+- } while (half < file->offset && half > 0);
177++ mem_free(run_offset);
178+
179+- return(DB_SUCCESS);
180++ return(error);
181+ }
182+
183+ /*************************************************************//**
184+--- /dev/null
185++++ b/mysql-test/suite/innodb/r/bug54330.result
186+@@ -0,0 +1,13 @@
187++DROP TABLE IF EXISTS t1;
188++CREATE TABLE t1 (
189++id BIGINT(20) AUTO_INCREMENT PRIMARY KEY,
190++bar BIGINT(20)
191++) ENGINE=InnoDB;
192++SELECT COUNT(*) FROM t1;
193++COUNT(*)
194++517672
195++ALTER TABLE t1 ADD INDEX baz (bar);
196++SELECT COUNT(*) FROM t1 FORCE INDEX (baz);
197++COUNT(*)
198++517672
199++DROP TABLE t1;
200+--- /dev/null
201++++ b/mysql-test/suite/innodb/t/bug54330.test
202+@@ -0,0 +1,38 @@
203++# Testcase for MySQL bug #54330 - broken fast index creation
204++
205++--disable_warnings
206++DROP TABLE IF EXISTS t1;
207++--enable_warnings
208++
209++CREATE TABLE t1 (
210++ id BIGINT(20) AUTO_INCREMENT PRIMARY KEY,
211++ bar BIGINT(20)
212++) ENGINE=InnoDB;
213++
214++--disable_query_log
215++SET @old_autocommit=@@AUTOCOMMIT;
216++SET AUTOCOMMIT=0;
217++let $1= 515641;
218++while ($1)
219++{
220++ eval INSERT INTO t1 (bar) VALUES (NULL);
221++ dec $1;
222++}
223++let $1= 2031;
224++while ($1)
225++{
226++ eval INSERT INTO t1 (bar) VALUES ($1);
227++ dec $1;
228++}
229++COMMIT;
230++SET AUTOCOMMIT=@old_autocommit;
231++--enable_query_log
232++
233++SELECT COUNT(*) FROM t1;
234++
235++ALTER TABLE t1 ADD INDEX baz (bar);
236++
237++# With the bug present this will differ from the SELECT above!
238++SELECT COUNT(*) FROM t1 FORCE INDEX (baz);
239++
240++DROP TABLE t1;
241
242=== modified file 'patches/series'
243--- patches/series 2012-01-19 08:42:23 +0000
244+++ patches/series 2012-02-24 09:44:20 +0000
245@@ -63,3 +63,4 @@
246 group_commit.patch
247 warning_fixes.patch
248 bug917246.patch
249+bug54330.patch

Subscribers

People subscribed via source and target branches