Merge lp:~percona-dev/percona-server/5.5.13-fix_bug798371 into lp:percona-server/5.5
- 5.5.13-fix_bug798371
- Merge into 5.5
Proposed by
Yasufumi Kinoshita
Status: | Merged |
---|---|
Approved by: | Stewart Smith |
Approved revision: | no longer in the source branch. |
Merged at revision: | 142 |
Proposed branch: | lp:~percona-dev/percona-server/5.5.13-fix_bug798371 |
Merge into: | lp:percona-server/5.5 |
Diff against target: |
1151 lines (+949/-32) 3 files modified
innodb_adaptive_hash_index_partitions.patch (+36/-7) innodb_buffer_pool_pages_i_s.patch (+2/-2) innodb_fix_misc.patch (+911/-23) |
To merge this branch: | bzr merge lp:~percona-dev/percona-server/5.5.13-fix_bug798371 |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Stewart Smith | Pending | ||
Review via email: mp+66858@code.launchpad.net |
Commit message
Description of the change
removing zip_clean list is needed to keep latch order with performance for innodb_
latch-order at split_buf_
but it doesn't allow to call buf_LRU_
I don't like to change slower/less scalable.
So, I removed zip_clean list as in 5.1. (but completely removed, some UNIV_DEBUG code which depend on zip_clean list were disabled)
To post a comment you must log in.
Revision history for this message
Yasufumi Kinoshita (yasufumi-kinoshita) wrote : | # |
Revision history for this message
Yasufumi Kinoshita (yasufumi-kinoshita) wrote : | # |
I will merge during porting 5.5.14 as the other pending bug fixes
Please test at the time.
No need to review this.
Revision history for this message
Stewart Smith (stewart) wrote : | # |
Going to set to Approved (looks fine anyway) and you can just mark it as "merged" when we merge the 5.5.14 porting in.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'innodb_adaptive_hash_index_partitions.patch' |
2 | --- innodb_adaptive_hash_index_partitions.patch 2011-06-27 07:12:14 +0000 |
3 | +++ innodb_adaptive_hash_index_partitions.patch 2011-07-05 06:49:28 +0000 |
4 | @@ -972,7 +972,7 @@ |
5 | |
6 | block->is_hashed = FALSE; |
7 | |
8 | -@@ -1481,7 +1482,7 @@ |
9 | +@@ -1413,7 +1414,7 @@ |
10 | /* To follow the latching order, we |
11 | have to release btr_search_latch |
12 | before acquiring block->latch. */ |
13 | @@ -981,7 +981,7 @@ |
14 | /* When we release the search latch, |
15 | we must rescan all blocks, because |
16 | some may become hashed again. */ |
17 | -@@ -1512,11 +1513,11 @@ |
18 | +@@ -1444,11 +1445,11 @@ |
19 | anything. block->is_hashed can only |
20 | be set on uncompressed file pages. */ |
21 | |
22 | @@ -995,7 +995,7 @@ |
23 | |
24 | ut_ad(!btr_search_enabled); |
25 | } |
26 | -@@ -1535,7 +1536,11 @@ |
27 | +@@ -1467,7 +1468,11 @@ |
28 | ibool released_search_latch; |
29 | |
30 | #ifdef UNIV_SYNC_DEBUG |
31 | @@ -1008,7 +1008,7 @@ |
32 | #endif /* UNIV_SYNC_DEBUG */ |
33 | ut_ad(!btr_search_enabled); |
34 | |
35 | -@@ -2655,6 +2660,7 @@ |
36 | +@@ -2203,6 +2208,7 @@ |
37 | { |
38 | block->check_index_page_at_flush = FALSE; |
39 | block->index = NULL; |
40 | @@ -1019,7 +1019,36 @@ |
41 | diff -ruN a/storage/innobase/buf/buf0lru.c b/storage/innobase/buf/buf0lru.c |
42 | --- a/storage/innobase/buf/buf0lru.c 2010-12-04 15:35:29.137347521 +0900 |
43 | +++ b/storage/innobase/buf/buf0lru.c 2010-12-04 16:12:48.658550840 +0900 |
44 | -@@ -1798,7 +1798,7 @@ |
45 | +@@ -605,7 +605,7 @@ |
46 | + |
47 | + mutex_exit(&buf_pool->LRU_list_mutex); |
48 | + |
49 | +- rw_lock_s_lock(&btr_search_latch); |
50 | ++ btr_search_s_lock_all(); |
51 | + chunk = buf_pool->chunks; |
52 | + for (j = buf_pool->n_chunks; j--; chunk++) { |
53 | + buf_block_t* block = chunk->blocks; |
54 | +@@ -617,16 +617,16 @@ |
55 | + continue; |
56 | + } |
57 | + |
58 | +- rw_lock_s_unlock(&btr_search_latch); |
59 | ++ btr_search_s_unlock_all(); |
60 | + |
61 | + rw_lock_x_lock(&block->lock); |
62 | + btr_search_drop_page_hash_index(block, NULL); |
63 | + rw_lock_x_unlock(&block->lock); |
64 | + |
65 | +- rw_lock_s_lock(&btr_search_latch); |
66 | ++ btr_search_s_lock_all(); |
67 | + } |
68 | + } |
69 | +- rw_lock_s_unlock(&btr_search_latch); |
70 | ++ btr_search_s_unlock_all(); |
71 | + } |
72 | + } |
73 | + |
74 | +@@ -1787,7 +1787,7 @@ |
75 | |
76 | UNIV_MEM_VALID(((buf_block_t*) bpage)->frame, |
77 | UNIV_PAGE_SIZE); |
78 | @@ -1264,7 +1293,7 @@ |
79 | diff -ruN a/storage/innobase/include/buf0buf.h b/storage/innobase/include/buf0buf.h |
80 | --- a/storage/innobase/include/buf0buf.h 2010-12-15 19:00:07.713604580 +0900 |
81 | +++ b/storage/innobase/include/buf0buf.h 2010-12-15 20:58:03.546839883 +0900 |
82 | -@@ -1546,7 +1546,7 @@ |
83 | +@@ -1540,7 +1540,7 @@ |
84 | pointers in the adaptive hash index |
85 | pointing to this frame */ |
86 | #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ |
87 | @@ -1273,7 +1302,7 @@ |
88 | already been built on this |
89 | page; note that it does not |
90 | guarantee that the index is |
91 | -@@ -1560,6 +1560,7 @@ |
92 | +@@ -1554,6 +1554,7 @@ |
93 | unsigned curr_left_side:1;/*!< TRUE or FALSE in hash indexing */ |
94 | dict_index_t* index; /*!< Index for which the adaptive |
95 | hash index has been created. */ |
96 | |
97 | === modified file 'innodb_buffer_pool_pages_i_s.patch' |
98 | --- innodb_buffer_pool_pages_i_s.patch 2011-06-27 07:12:14 +0000 |
99 | +++ innodb_buffer_pool_pages_i_s.patch 2011-07-05 06:49:28 +0000 |
100 | @@ -8,7 +8,7 @@ |
101 | diff -ruN a/storage/innobase/buf/buf0buf.c b/storage/innobase/buf/buf0buf.c |
102 | --- a/storage/innobase/buf/buf0buf.c 2010-12-04 20:20:44.595483291 +0900 |
103 | +++ b/storage/innobase/buf/buf0buf.c 2010-12-06 19:28:04.055227506 +0900 |
104 | -@@ -4560,6 +4560,36 @@ |
105 | +@@ -4159,6 +4159,36 @@ |
106 | mutex_exit(block_mutex); |
107 | } |
108 | |
109 | @@ -786,7 +786,7 @@ |
110 | diff -ruN a/storage/innobase/include/buf0buf.h b/storage/innobase/include/buf0buf.h |
111 | --- a/storage/innobase/include/buf0buf.h 2010-12-04 19:46:40.197471531 +0900 |
112 | +++ b/storage/innobase/include/buf0buf.h 2010-12-06 19:23:47.638195824 +0900 |
113 | -@@ -1144,6 +1144,14 @@ |
114 | +@@ -1138,6 +1138,14 @@ |
115 | /*===========*/ |
116 | const buf_pool_t* buf_pool) /*!< in: buffer pool */ |
117 | __attribute__((nonnull, const)); |
118 | |
119 | === modified file 'innodb_fix_misc.patch' |
120 | --- innodb_fix_misc.patch 2011-06-29 09:32:51 +0000 |
121 | +++ innodb_fix_misc.patch 2011-07-05 06:49:28 +0000 |
122 | @@ -5,15 +5,639 @@ |
123 | # Bug fix for |
124 | # http://bugs.mysql.com/56433 (always: because good for all users, and safe) |
125 | # and http://bugs.mysql.com/51325 (optional: innodb_lazy_drop_table) |
126 | +# and http://bugs.mysql.com/61341 (needed for innodb_lazy_drop_table to remove buf_LRU_insert_zip_clean()) |
127 | # were added. They may be removed in the future when will be fixed officially. |
128 | # |
129 | #!!! notice !!! |
130 | # Any small change to this file in the main branch |
131 | # should be done or reviewed by the maintainer! |
132 | +diff -ruN a/storage/innobase/buf/buf0buddy.c b/storage/innobase/buf/buf0buddy.c |
133 | +--- a/storage/innobase/buf/buf0buddy.c 2011-07-05 15:15:38.473447294 +0900 |
134 | ++++ b/storage/innobase/buf/buf0buddy.c 2011-07-05 15:16:09.258444327 +0900 |
135 | +@@ -378,7 +378,6 @@ |
136 | + buf_page_t* bpage, /*!< in: block to relocate */ |
137 | + buf_page_t* dpage) /*!< in: free block to relocate to */ |
138 | + { |
139 | +- buf_page_t* b; |
140 | + buf_pool_t* buf_pool = buf_pool_from_bpage(bpage); |
141 | + |
142 | + //ut_ad(buf_pool_mutex_own(buf_pool)); |
143 | +@@ -421,16 +420,6 @@ |
144 | + buf_relocate(bpage, dpage); |
145 | + ut_d(bpage->state = BUF_BLOCK_ZIP_FREE); |
146 | + |
147 | +- /* relocate buf_pool->zip_clean */ |
148 | +- b = UT_LIST_GET_PREV(zip_list, dpage); |
149 | +- UT_LIST_REMOVE(zip_list, buf_pool->zip_clean, dpage); |
150 | +- |
151 | +- if (b) { |
152 | +- UT_LIST_INSERT_AFTER(zip_list, buf_pool->zip_clean, b, dpage); |
153 | +- } else { |
154 | +- UT_LIST_ADD_FIRST(zip_list, buf_pool->zip_clean, dpage); |
155 | +- } |
156 | +- |
157 | + UNIV_MEM_INVALID(bpage, sizeof *bpage); |
158 | + |
159 | + mutex_exit(&buf_pool->zip_mutex); |
160 | diff -ruN a/storage/innobase/buf/buf0buf.c b/storage/innobase/buf/buf0buf.c |
161 | --- a/storage/innobase/buf/buf0buf.c 2011-02-23 19:00:48.178696354 +0900 |
162 | +++ b/storage/innobase/buf/buf0buf.c 2011-02-23 19:01:19.138826278 +0900 |
163 | -@@ -4084,6 +4084,7 @@ |
164 | +@@ -1189,72 +1189,6 @@ |
165 | + return(NULL); |
166 | + } |
167 | + |
168 | +-/*********************************************************************//** |
169 | +-Checks that all blocks in the buffer chunk are in BUF_BLOCK_NOT_USED state. |
170 | +-@return TRUE if all freed */ |
171 | +-static |
172 | +-ibool |
173 | +-buf_chunk_all_free( |
174 | +-/*===============*/ |
175 | +- const buf_chunk_t* chunk) /*!< in: chunk being checked */ |
176 | +-{ |
177 | +- const buf_block_t* block; |
178 | +- ulint i; |
179 | +- |
180 | +- block = chunk->blocks; |
181 | +- |
182 | +- for (i = chunk->size; i--; block++) { |
183 | +- |
184 | +- if (buf_block_get_state(block) != BUF_BLOCK_NOT_USED) { |
185 | +- |
186 | +- return(FALSE); |
187 | +- } |
188 | +- } |
189 | +- |
190 | +- return(TRUE); |
191 | +-} |
192 | +- |
193 | +-/********************************************************************//** |
194 | +-Frees a chunk of buffer frames. */ |
195 | +-static |
196 | +-void |
197 | +-buf_chunk_free( |
198 | +-/*===========*/ |
199 | +- buf_pool_t* buf_pool, /*!< in: buffer pool instance */ |
200 | +- buf_chunk_t* chunk) /*!< out: chunk of buffers */ |
201 | +-{ |
202 | +- buf_block_t* block; |
203 | +- const buf_block_t* block_end; |
204 | +- |
205 | +- //ut_ad(buf_pool_mutex_own(buf_pool)); /* but we need all mutex here */ |
206 | +- |
207 | +- block_end = chunk->blocks + chunk->size; |
208 | +- |
209 | +- for (block = chunk->blocks; block < block_end; block++) { |
210 | +- ut_a(buf_block_get_state(block) == BUF_BLOCK_NOT_USED); |
211 | +- ut_a(!block->page.zip.data); |
212 | +- |
213 | +- ut_ad(!block->page.in_LRU_list); |
214 | +- ut_ad(!block->in_unzip_LRU_list); |
215 | +- ut_ad(!block->page.in_flush_list); |
216 | +- /* Remove the block from the free list. */ |
217 | +- mutex_enter(&buf_pool->free_list_mutex); |
218 | +- ut_ad(block->page.in_free_list); |
219 | +- UT_LIST_REMOVE(free, buf_pool->free, (&block->page)); |
220 | +- mutex_exit(&buf_pool->free_list_mutex); |
221 | +- |
222 | +- /* Free the latches. */ |
223 | +- mutex_free(&block->mutex); |
224 | +- rw_lock_free(&block->lock); |
225 | +-#ifdef UNIV_SYNC_DEBUG |
226 | +- rw_lock_free(&block->debug_latch); |
227 | +-#endif /* UNIV_SYNC_DEBUG */ |
228 | +- UNIV_MEM_UNDESC(block); |
229 | +- } |
230 | +- |
231 | +- os_mem_free_large(chunk->mem, chunk->mem_size); |
232 | +-} |
233 | +- |
234 | + /********************************************************************//** |
235 | + Set buffer pool size variables after resizing it */ |
236 | + static |
237 | +@@ -1380,8 +1314,6 @@ |
238 | + chunk = chunks + buf_pool->n_chunks; |
239 | + |
240 | + while (--chunk >= chunks) { |
241 | +- /* Bypass the checks of buf_chunk_free(), since they |
242 | +- would fail at shutdown. */ |
243 | + os_mem_free_large(chunk->mem, chunk->mem_size); |
244 | + } |
245 | + |
246 | +@@ -1644,290 +1576,6 @@ |
247 | + HASH_INSERT(buf_page_t, hash, buf_pool->page_hash, fold, dpage); |
248 | + } |
249 | + |
250 | +-/********************************************************************//** |
251 | +-Shrinks a buffer pool instance. */ |
252 | +-static |
253 | +-void |
254 | +-buf_pool_shrink_instance( |
255 | +-/*=====================*/ |
256 | +- buf_pool_t* buf_pool, /*!< in: buffer pool instance */ |
257 | +- ulint chunk_size) /*!< in: number of pages to remove */ |
258 | +-{ |
259 | +- buf_chunk_t* chunks; |
260 | +- buf_chunk_t* chunk; |
261 | +- ulint max_size; |
262 | +- ulint max_free_size; |
263 | +- buf_chunk_t* max_chunk; |
264 | +- buf_chunk_t* max_free_chunk; |
265 | +- |
266 | +- ut_ad(!buf_pool_mutex_own(buf_pool)); |
267 | +- |
268 | +-try_again: |
269 | +- btr_search_disable(); /* Empty the adaptive hash index again */ |
270 | +- //buf_pool_mutex_enter(buf_pool); |
271 | +- mutex_enter(&buf_pool->LRU_list_mutex); |
272 | +- |
273 | +-shrink_again: |
274 | +- if (buf_pool->n_chunks <= 1) { |
275 | +- |
276 | +- /* Cannot shrink if there is only one chunk */ |
277 | +- goto func_done; |
278 | +- } |
279 | +- |
280 | +- /* Search for the largest free chunk |
281 | +- not larger than the size difference */ |
282 | +- chunks = buf_pool->chunks; |
283 | +- chunk = chunks + buf_pool->n_chunks; |
284 | +- max_size = max_free_size = 0; |
285 | +- max_chunk = max_free_chunk = NULL; |
286 | +- |
287 | +- while (--chunk >= chunks) { |
288 | +- if (chunk->size <= chunk_size |
289 | +- && chunk->size > max_free_size) { |
290 | +- if (chunk->size > max_size) { |
291 | +- max_size = chunk->size; |
292 | +- max_chunk = chunk; |
293 | +- } |
294 | +- |
295 | +- if (buf_chunk_all_free(chunk)) { |
296 | +- max_free_size = chunk->size; |
297 | +- max_free_chunk = chunk; |
298 | +- } |
299 | +- } |
300 | +- } |
301 | +- |
302 | +- if (!max_free_size) { |
303 | +- |
304 | +- ulint dirty = 0; |
305 | +- ulint nonfree = 0; |
306 | +- buf_block_t* block; |
307 | +- buf_block_t* bend; |
308 | +- |
309 | +- /* Cannot shrink: try again later |
310 | +- (do not assign srv_buf_pool_old_size) */ |
311 | +- if (!max_chunk) { |
312 | +- |
313 | +- goto func_exit; |
314 | +- } |
315 | +- |
316 | +- block = max_chunk->blocks; |
317 | +- bend = block + max_chunk->size; |
318 | +- |
319 | +- /* Move the blocks of chunk to the end of the |
320 | +- LRU list and try to flush them. */ |
321 | +- for (; block < bend; block++) { |
322 | +- switch (buf_block_get_state(block)) { |
323 | +- case BUF_BLOCK_NOT_USED: |
324 | +- continue; |
325 | +- case BUF_BLOCK_FILE_PAGE: |
326 | +- break; |
327 | +- default: |
328 | +- nonfree++; |
329 | +- continue; |
330 | +- } |
331 | +- |
332 | +- mutex_enter(&block->mutex); |
333 | +- /* The following calls will temporarily |
334 | +- release block->mutex and buf_pool->mutex. |
335 | +- Therefore, we have to always retry, |
336 | +- even if !dirty && !nonfree. */ |
337 | +- |
338 | +- if (!buf_flush_ready_for_replace(&block->page)) { |
339 | +- |
340 | +- buf_LRU_make_block_old(&block->page); |
341 | +- dirty++; |
342 | +- } else if (buf_LRU_free_block(&block->page, TRUE, TRUE) |
343 | +- != BUF_LRU_FREED) { |
344 | +- nonfree++; |
345 | +- } |
346 | +- |
347 | +- mutex_exit(&block->mutex); |
348 | +- } |
349 | +- |
350 | +- //buf_pool_mutex_exit(buf_pool); |
351 | +- mutex_exit(&buf_pool->LRU_list_mutex); |
352 | +- |
353 | +- /* Request for a flush of the chunk if it helps. |
354 | +- Do not flush if there are non-free blocks, since |
355 | +- flushing will not make the chunk freeable. */ |
356 | +- if (nonfree) { |
357 | +- /* Avoid busy-waiting. */ |
358 | +- os_thread_sleep(100000); |
359 | +- } else if (dirty |
360 | +- && buf_flush_LRU(buf_pool, dirty) |
361 | +- == ULINT_UNDEFINED) { |
362 | +- |
363 | +- buf_flush_wait_batch_end(buf_pool, BUF_FLUSH_LRU); |
364 | +- } |
365 | +- |
366 | +- goto try_again; |
367 | +- } |
368 | +- |
369 | +- max_size = max_free_size; |
370 | +- max_chunk = max_free_chunk; |
371 | +- |
372 | +- buf_pool->old_pool_size = buf_pool->curr_pool_size; |
373 | +- |
374 | +- /* Rewrite buf_pool->chunks. Copy everything but max_chunk. */ |
375 | +- chunks = mem_alloc((buf_pool->n_chunks - 1) * sizeof *chunks); |
376 | +- memcpy(chunks, buf_pool->chunks, |
377 | +- (max_chunk - buf_pool->chunks) * sizeof *chunks); |
378 | +- memcpy(chunks + (max_chunk - buf_pool->chunks), |
379 | +- max_chunk + 1, |
380 | +- buf_pool->chunks + buf_pool->n_chunks |
381 | +- - (max_chunk + 1)); |
382 | +- ut_a(buf_pool->curr_size > max_chunk->size); |
383 | +- buf_pool->curr_size -= max_chunk->size; |
384 | +- buf_pool->curr_pool_size = buf_pool->curr_size * UNIV_PAGE_SIZE; |
385 | +- chunk_size -= max_chunk->size; |
386 | +- buf_chunk_free(buf_pool, max_chunk); |
387 | +- mem_free(buf_pool->chunks); |
388 | +- buf_pool->chunks = chunks; |
389 | +- buf_pool->n_chunks--; |
390 | +- |
391 | +- /* Allow a slack of one megabyte. */ |
392 | +- if (chunk_size > 1048576 / UNIV_PAGE_SIZE) { |
393 | +- |
394 | +- goto shrink_again; |
395 | +- } |
396 | +- goto func_exit; |
397 | +- |
398 | +-func_done: |
399 | +- buf_pool->old_pool_size = buf_pool->curr_pool_size; |
400 | +-func_exit: |
401 | +- //buf_pool_mutex_exit(buf_pool); |
402 | +- mutex_exit(&buf_pool->LRU_list_mutex); |
403 | +- btr_search_enable(); |
404 | +-} |
405 | +- |
406 | +-/********************************************************************//** |
407 | +-Shrinks the buffer pool. */ |
408 | +-static |
409 | +-void |
410 | +-buf_pool_shrink( |
411 | +-/*============*/ |
412 | +- ulint chunk_size) /*!< in: number of pages to remove */ |
413 | +-{ |
414 | +- ulint i; |
415 | +- |
416 | +- for (i = 0; i < srv_buf_pool_instances; i++) { |
417 | +- buf_pool_t* buf_pool; |
418 | +- ulint instance_chunk_size; |
419 | +- |
420 | +- instance_chunk_size = chunk_size / srv_buf_pool_instances; |
421 | +- buf_pool = buf_pool_from_array(i); |
422 | +- buf_pool_shrink_instance(buf_pool, instance_chunk_size); |
423 | +- } |
424 | +- |
425 | +- buf_pool_set_sizes(); |
426 | +-} |
427 | +- |
428 | +-/********************************************************************//** |
429 | +-Rebuild buf_pool->page_hash for a buffer pool instance. */ |
430 | +-static |
431 | +-void |
432 | +-buf_pool_page_hash_rebuild_instance( |
433 | +-/*================================*/ |
434 | +- buf_pool_t* buf_pool) /*!< in: buffer pool instance */ |
435 | +-{ |
436 | +- ulint i; |
437 | +- buf_page_t* b; |
438 | +- buf_chunk_t* chunk; |
439 | +- ulint n_chunks; |
440 | +- hash_table_t* zip_hash; |
441 | +- hash_table_t* page_hash; |
442 | +- |
443 | +- //buf_pool_mutex_enter(buf_pool); |
444 | +- mutex_enter(&buf_pool->LRU_list_mutex); |
445 | +- rw_lock_x_lock(&buf_pool->page_hash_latch); |
446 | +- |
447 | +- /* Free, create, and populate the hash table. */ |
448 | +- hash_table_free(buf_pool->page_hash); |
449 | +- buf_pool->page_hash = page_hash = hash_create(2 * buf_pool->curr_size); |
450 | +- zip_hash = hash_create(2 * buf_pool->curr_size); |
451 | +- |
452 | +- HASH_MIGRATE(buf_pool->zip_hash, zip_hash, buf_page_t, hash, |
453 | +- BUF_POOL_ZIP_FOLD_BPAGE); |
454 | +- |
455 | +- hash_table_free(buf_pool->zip_hash); |
456 | +- buf_pool->zip_hash = zip_hash; |
457 | +- |
458 | +- /* Insert the uncompressed file pages to buf_pool->page_hash. */ |
459 | +- |
460 | +- chunk = buf_pool->chunks; |
461 | +- n_chunks = buf_pool->n_chunks; |
462 | +- |
463 | +- for (i = 0; i < n_chunks; i++, chunk++) { |
464 | +- ulint j; |
465 | +- buf_block_t* block = chunk->blocks; |
466 | +- |
467 | +- for (j = 0; j < chunk->size; j++, block++) { |
468 | +- if (buf_block_get_state(block) |
469 | +- == BUF_BLOCK_FILE_PAGE) { |
470 | +- ut_ad(!block->page.in_zip_hash); |
471 | +- ut_ad(block->page.in_page_hash); |
472 | +- |
473 | +- HASH_INSERT(buf_page_t, hash, page_hash, |
474 | +- buf_page_address_fold( |
475 | +- block->page.space, |
476 | +- block->page.offset), |
477 | +- &block->page); |
478 | +- } |
479 | +- } |
480 | +- } |
481 | +- |
482 | +- /* Insert the compressed-only pages to buf_pool->page_hash. |
483 | +- All such blocks are either in buf_pool->zip_clean or |
484 | +- in buf_pool->flush_list. */ |
485 | +- |
486 | +- mutex_enter(&buf_pool->zip_mutex); |
487 | +- for (b = UT_LIST_GET_FIRST(buf_pool->zip_clean); b; |
488 | +- b = UT_LIST_GET_NEXT(zip_list, b)) { |
489 | +- ut_a(buf_page_get_state(b) == BUF_BLOCK_ZIP_PAGE); |
490 | +- ut_ad(!b->in_flush_list); |
491 | +- ut_ad(b->in_LRU_list); |
492 | +- ut_ad(b->in_page_hash); |
493 | +- ut_ad(!b->in_zip_hash); |
494 | +- |
495 | +- HASH_INSERT(buf_page_t, hash, page_hash, |
496 | +- buf_page_address_fold(b->space, b->offset), b); |
497 | +- } |
498 | +- mutex_exit(&buf_pool->zip_mutex); |
499 | +- |
500 | +- buf_flush_list_mutex_enter(buf_pool); |
501 | +- for (b = UT_LIST_GET_FIRST(buf_pool->flush_list); b; |
502 | +- b = UT_LIST_GET_NEXT(flush_list, b)) { |
503 | +- ut_ad(b->in_flush_list); |
504 | +- ut_ad(b->in_LRU_list); |
505 | +- ut_ad(b->in_page_hash); |
506 | +- ut_ad(!b->in_zip_hash); |
507 | +- |
508 | +- switch (buf_page_get_state(b)) { |
509 | +- case BUF_BLOCK_ZIP_DIRTY: |
510 | +- HASH_INSERT(buf_page_t, hash, page_hash, |
511 | +- buf_page_address_fold(b->space, |
512 | +- b->offset), b); |
513 | +- break; |
514 | +- case BUF_BLOCK_FILE_PAGE: |
515 | +- /* uncompressed page */ |
516 | +- break; |
517 | +- case BUF_BLOCK_ZIP_FREE: |
518 | +- case BUF_BLOCK_ZIP_PAGE: |
519 | +- case BUF_BLOCK_NOT_USED: |
520 | +- case BUF_BLOCK_READY_FOR_USE: |
521 | +- case BUF_BLOCK_MEMORY: |
522 | +- case BUF_BLOCK_REMOVE_HASH: |
523 | +- ut_error; |
524 | +- break; |
525 | +- } |
526 | +- } |
527 | +- |
528 | +- buf_flush_list_mutex_exit(buf_pool); |
529 | +- //buf_pool_mutex_exit(buf_pool); |
530 | +- mutex_exit(&buf_pool->LRU_list_mutex); |
531 | +- rw_lock_x_unlock(&buf_pool->page_hash_latch); |
532 | +-} |
533 | +- |
534 | + /******************************************************************** |
535 | + Determine if a block is a sentinel for a buffer pool watch. |
536 | + @return TRUE if a sentinel for a buffer pool watch, FALSE if not */ |
537 | +@@ -2049,127 +1697,6 @@ |
538 | + return(NULL); |
539 | + } |
540 | + |
541 | +-/********************************************************************//** |
542 | +-Rebuild buf_pool->page_hash. */ |
543 | +-static |
544 | +-void |
545 | +-buf_pool_page_hash_rebuild(void) |
546 | +-/*============================*/ |
547 | +-{ |
548 | +- ulint i; |
549 | +- |
550 | +- for (i = 0; i < srv_buf_pool_instances; i++) { |
551 | +- buf_pool_page_hash_rebuild_instance(buf_pool_from_array(i)); |
552 | +- } |
553 | +-} |
554 | +- |
555 | +-/********************************************************************//** |
556 | +-Increase the buffer pool size of one buffer pool instance. */ |
557 | +-static |
558 | +-void |
559 | +-buf_pool_increase_instance( |
560 | +-/*=======================*/ |
561 | +- buf_pool_t* buf_pool, /*!< in: buffer pool instane */ |
562 | +- ulint change_size) /*!< in: new size of the pool */ |
563 | +-{ |
564 | +- buf_chunk_t* chunks; |
565 | +- buf_chunk_t* chunk; |
566 | +- |
567 | +- mutex_enter(&buf_pool->LRU_list_mutex); |
568 | +- rw_lock_x_lock(&buf_pool->page_hash_latch); |
569 | +- buf_pool_mutex_enter(buf_pool); |
570 | +- chunks = mem_alloc((buf_pool->n_chunks + 1) * sizeof *chunks); |
571 | +- |
572 | +- memcpy(chunks, buf_pool->chunks, buf_pool->n_chunks * sizeof *chunks); |
573 | +- |
574 | +- chunk = &chunks[buf_pool->n_chunks]; |
575 | +- |
576 | +- if (!buf_chunk_init(buf_pool, chunk, change_size)) { |
577 | +- mem_free(chunks); |
578 | +- } else { |
579 | +- buf_pool->old_pool_size = buf_pool->curr_pool_size; |
580 | +- buf_pool->curr_size += chunk->size; |
581 | +- buf_pool->curr_pool_size = buf_pool->curr_size * UNIV_PAGE_SIZE; |
582 | +- mem_free(buf_pool->chunks); |
583 | +- buf_pool->chunks = chunks; |
584 | +- buf_pool->n_chunks++; |
585 | +- } |
586 | +- |
587 | +- mutex_exit(&buf_pool->LRU_list_mutex); |
588 | +- rw_lock_x_unlock(&buf_pool->page_hash_latch); |
589 | +- buf_pool_mutex_exit(buf_pool); |
590 | +-} |
591 | +- |
592 | +-/********************************************************************//** |
593 | +-Increase the buffer pool size. */ |
594 | +-static |
595 | +-void |
596 | +-buf_pool_increase( |
597 | +-/*==============*/ |
598 | +- ulint change_size) |
599 | +-{ |
600 | +- ulint i; |
601 | +- |
602 | +- for (i = 0; i < srv_buf_pool_instances; i++) { |
603 | +- buf_pool_increase_instance( |
604 | +- buf_pool_from_array(i), |
605 | +- change_size / srv_buf_pool_instances); |
606 | +- } |
607 | +- |
608 | +- buf_pool_set_sizes(); |
609 | +-} |
610 | +- |
611 | +-/********************************************************************//** |
612 | +-Resizes the buffer pool. */ |
613 | +-UNIV_INTERN |
614 | +-void |
615 | +-buf_pool_resize(void) |
616 | +-/*=================*/ |
617 | +-{ |
618 | +- ulint change_size; |
619 | +- ulint min_change_size = 1048576 * srv_buf_pool_instances; |
620 | +- |
621 | +- buf_pool_mutex_enter_all(); |
622 | +- |
623 | +- if (srv_buf_pool_old_size == srv_buf_pool_size) { |
624 | +- |
625 | +- buf_pool_mutex_exit_all(); |
626 | +- |
627 | +- return; |
628 | +- |
629 | +- } else if (srv_buf_pool_curr_size + min_change_size |
630 | +- > srv_buf_pool_size) { |
631 | +- |
632 | +- change_size = (srv_buf_pool_curr_size - srv_buf_pool_size) |
633 | +- / UNIV_PAGE_SIZE; |
634 | +- |
635 | +- buf_pool_mutex_exit_all(); |
636 | +- |
637 | +- /* Disable adaptive hash indexes and empty the index |
638 | +- in order to free up memory in the buffer pool chunks. */ |
639 | +- buf_pool_shrink(change_size); |
640 | +- |
641 | +- } else if (srv_buf_pool_curr_size + min_change_size |
642 | +- < srv_buf_pool_size) { |
643 | +- |
644 | +- /* Enlarge the buffer pool by at least one megabyte */ |
645 | +- |
646 | +- change_size = srv_buf_pool_size - srv_buf_pool_curr_size; |
647 | +- |
648 | +- buf_pool_mutex_exit_all(); |
649 | +- |
650 | +- buf_pool_increase(change_size); |
651 | +- } else { |
652 | +- srv_buf_pool_size = srv_buf_pool_old_size; |
653 | +- |
654 | +- buf_pool_mutex_exit_all(); |
655 | +- |
656 | +- return; |
657 | +- } |
658 | +- |
659 | +- buf_pool_page_hash_rebuild(); |
660 | +-} |
661 | +- |
662 | + /****************************************************************//** |
663 | + Remove the sentinel block for the watch before replacing it with a real block. |
664 | + buf_page_watch_clear() or buf_page_watch_occurred() will notice that |
665 | +@@ -2513,6 +2040,27 @@ |
666 | + #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */ |
667 | + } |
668 | + |
669 | ++ if (UNIV_UNLIKELY(bpage->space_was_being_deleted)) { |
670 | ++ /* This page is obsoleted, should discard and retry */ |
671 | ++ rw_lock_s_unlock(&buf_pool->page_hash_latch); |
672 | ++ |
673 | ++ mutex_enter(&buf_pool->LRU_list_mutex); |
674 | ++ block_mutex = buf_page_get_mutex_enter(bpage); |
675 | ++ |
676 | ++ if (UNIV_UNLIKELY(!block_mutex)) { |
677 | ++ mutex_exit(&buf_pool->LRU_list_mutex); |
678 | ++ goto lookup; |
679 | ++ } |
680 | ++ |
681 | ++ buf_LRU_free_block(bpage, TRUE, TRUE); |
682 | ++ |
683 | ++ mutex_exit(&buf_pool->LRU_list_mutex); |
684 | ++ mutex_exit(block_mutex); |
685 | ++ block_mutex = NULL; |
686 | ++ |
687 | ++ goto lookup; |
688 | ++ } |
689 | ++ |
690 | + if (UNIV_UNLIKELY(!bpage->zip.data)) { |
691 | + /* There is no compressed page. */ |
692 | + err_exit: |
693 | +@@ -3025,6 +2573,27 @@ |
694 | + block = (buf_block_t*) buf_page_hash_get_low( |
695 | + buf_pool, space, offset, fold); |
696 | + if (block) { |
697 | ++ if (UNIV_UNLIKELY(block->page.space_was_being_deleted)) { |
698 | ++ /* This page is obsoleted, should discard and retry */ |
699 | ++ rw_lock_s_unlock(&buf_pool->page_hash_latch); |
700 | ++ |
701 | ++ mutex_enter(&buf_pool->LRU_list_mutex); |
702 | ++ block_mutex = buf_page_get_mutex_enter((buf_page_t*)block); |
703 | ++ |
704 | ++ if (UNIV_UNLIKELY(!block_mutex)) { |
705 | ++ mutex_exit(&buf_pool->LRU_list_mutex); |
706 | ++ goto loop; |
707 | ++ } |
708 | ++ |
709 | ++ buf_LRU_free_block((buf_page_t*)block, TRUE, TRUE); |
710 | ++ |
711 | ++ mutex_exit(&buf_pool->LRU_list_mutex); |
712 | ++ mutex_exit(block_mutex); |
713 | ++ block_mutex = NULL; |
714 | ++ |
715 | ++ goto loop; |
716 | ++ } |
717 | ++ |
718 | + block_mutex = buf_page_get_mutex_enter((buf_page_t*)block); |
719 | + ut_a(block_mutex); |
720 | + } |
721 | +@@ -3224,8 +2793,6 @@ |
722 | + |
723 | + if (buf_page_get_state(&block->page) |
724 | + == BUF_BLOCK_ZIP_PAGE) { |
725 | +- UT_LIST_REMOVE(zip_list, buf_pool->zip_clean, |
726 | +- &block->page); |
727 | + ut_ad(!block->page.in_flush_list); |
728 | + } else { |
729 | + /* Relocate buf_pool->flush_list. */ |
730 | +@@ -3943,11 +3510,28 @@ |
731 | + |
732 | + fold = buf_page_address_fold(space, offset); |
733 | + |
734 | ++retry: |
735 | + //buf_pool_mutex_enter(buf_pool); |
736 | + mutex_enter(&buf_pool->LRU_list_mutex); |
737 | + rw_lock_x_lock(&buf_pool->page_hash_latch); |
738 | + |
739 | + watch_page = buf_page_hash_get_low(buf_pool, space, offset, fold); |
740 | ++ |
741 | ++ if (UNIV_UNLIKELY(watch_page && watch_page->space_was_being_deleted)) { |
742 | ++ mutex_t* block_mutex = buf_page_get_mutex_enter(watch_page); |
743 | ++ |
744 | ++ /* This page is obsoleted, should discard and retry */ |
745 | ++ rw_lock_x_unlock(&buf_pool->page_hash_latch); |
746 | ++ ut_a(block_mutex); |
747 | ++ |
748 | ++ buf_LRU_free_block(watch_page, TRUE, TRUE); |
749 | ++ |
750 | ++ mutex_exit(&buf_pool->LRU_list_mutex); |
751 | ++ mutex_exit(block_mutex); |
752 | ++ |
753 | ++ goto retry; |
754 | ++ } |
755 | ++ |
756 | + if (watch_page && !buf_pool_watch_is_sentinel(buf_pool, watch_page)) { |
757 | + /* The page is already in the buffer pool. */ |
758 | + watch_page = NULL; |
759 | +@@ -4084,6 +3668,7 @@ |
760 | bpage->state = BUF_BLOCK_ZIP_PAGE; |
761 | bpage->space = space; |
762 | bpage->offset = offset; |
763 | @@ -21,6 +645,121 @@ |
764 | |
765 | |
766 | #ifdef UNIV_DEBUG |
767 | +@@ -4112,7 +3697,6 @@ |
768 | + |
769 | + /* The block must be put to the LRU list, to the old blocks */ |
770 | + buf_LRU_add_block(bpage, TRUE/* to old blocks */); |
771 | +- buf_LRU_insert_zip_clean(bpage); |
772 | + |
773 | + mutex_exit(&buf_pool->LRU_list_mutex); |
774 | + |
775 | +@@ -4167,6 +3751,7 @@ |
776 | + |
777 | + fold = buf_page_address_fold(space, offset); |
778 | + |
779 | ++retry: |
780 | + //buf_pool_mutex_enter(buf_pool); |
781 | + mutex_enter(&buf_pool->LRU_list_mutex); |
782 | + rw_lock_x_lock(&buf_pool->page_hash_latch); |
783 | +@@ -4174,6 +3759,21 @@ |
784 | + block = (buf_block_t*) buf_page_hash_get_low( |
785 | + buf_pool, space, offset, fold); |
786 | + |
787 | ++ if (UNIV_UNLIKELY(block && block->page.space_was_being_deleted)) { |
788 | ++ mutex_t* block_mutex = buf_page_get_mutex_enter((buf_page_t*)block); |
789 | ++ |
790 | ++ /* This page is obsoleted, should discard and retry */ |
791 | ++ rw_lock_x_unlock(&buf_pool->page_hash_latch); |
792 | ++ ut_a(block_mutex); |
793 | ++ |
794 | ++ buf_LRU_free_block((buf_page_t*)block, TRUE, TRUE); |
795 | ++ |
796 | ++ mutex_exit(&buf_pool->LRU_list_mutex); |
797 | ++ mutex_exit(block_mutex); |
798 | ++ |
799 | ++ goto retry; |
800 | ++ } |
801 | ++ |
802 | + if (block |
803 | + && buf_page_in_file(&block->page) |
804 | + && !buf_pool_watch_is_sentinel(buf_pool, &block->page)) { |
805 | +@@ -4465,9 +4065,9 @@ |
806 | + } |
807 | + |
808 | + if (io_type == BUF_IO_WRITE |
809 | +- && (buf_page_get_state(bpage) == BUF_BLOCK_ZIP_DIRTY |
810 | +- || buf_page_get_flush_type(bpage) == BUF_FLUSH_LRU)) { |
811 | +- /* to keep consistency at buf_LRU_insert_zip_clean() */ |
812 | ++ && (/* buf_page_get_state(bpage) == BUF_BLOCK_ZIP_DIRTY |
813 | ++ ||*/ buf_page_get_flush_type(bpage) == BUF_FLUSH_LRU)) { |
814 | ++ /* (REMOVED) to keep consistency at buf_LRU_insert_zip_clean() */ |
815 | + have_LRU_mutex = TRUE; /* optimistic */ |
816 | + } |
817 | + retry_mutex: |
818 | +@@ -4788,35 +4388,7 @@ |
819 | + |
820 | + mutex_enter(&buf_pool->zip_mutex); |
821 | + |
822 | +- /* Check clean compressed-only blocks. */ |
823 | +- |
824 | +- for (b = UT_LIST_GET_FIRST(buf_pool->zip_clean); b; |
825 | +- b = UT_LIST_GET_NEXT(zip_list, b)) { |
826 | +- ut_a(buf_page_get_state(b) == BUF_BLOCK_ZIP_PAGE); |
827 | +- switch (buf_page_get_io_fix(b)) { |
828 | +- case BUF_IO_NONE: |
829 | +- /* All clean blocks should be I/O-unfixed. */ |
830 | +- break; |
831 | +- case BUF_IO_READ: |
832 | +- /* In buf_LRU_free_block(), we temporarily set |
833 | +- b->io_fix = BUF_IO_READ for a newly allocated |
834 | +- control block in order to prevent |
835 | +- buf_page_get_gen() from decompressing the block. */ |
836 | +- break; |
837 | +- default: |
838 | +- ut_error; |
839 | +- break; |
840 | +- } |
841 | +- |
842 | +- /* It is OK to read oldest_modification here because |
843 | +- we have acquired buf_pool->zip_mutex above which acts |
844 | +- as the 'block->mutex' for these bpages. */ |
845 | +- ut_a(!b->oldest_modification); |
846 | +- ut_a(buf_page_hash_get(buf_pool, b->space, b->offset) == b); |
847 | +- |
848 | +- n_lru++; |
849 | +- n_zip++; |
850 | +- } |
851 | ++ /* Check clean compressed-only blocks. (zip_clean list was removed)*/ |
852 | + |
853 | + /* Check dirty blocks. */ |
854 | + |
855 | +@@ -4880,7 +4452,7 @@ |
856 | + ut_error; |
857 | + } |
858 | + |
859 | +- ut_a(UT_LIST_GET_LEN(buf_pool->LRU) == n_lru); |
860 | ++ //ut_a(UT_LIST_GET_LEN(buf_pool->LRU) == n_lru); /* zip_clean list was removed */ |
861 | + /* because of latching order with block->mutex, we cannot get needed mutexes before that */ |
862 | + /* |
863 | + if (UT_LIST_GET_LEN(buf_pool->free) != n_free) { |
864 | +@@ -5116,17 +4688,6 @@ |
865 | + |
866 | + /* Traverse the lists of clean and dirty compressed-only blocks. */ |
867 | + |
868 | +- for (b = UT_LIST_GET_FIRST(buf_pool->zip_clean); b; |
869 | +- b = UT_LIST_GET_NEXT(zip_list, b)) { |
870 | +- ut_a(buf_page_get_state(b) == BUF_BLOCK_ZIP_PAGE); |
871 | +- ut_a(buf_page_get_io_fix(b) != BUF_IO_WRITE); |
872 | +- |
873 | +- if (b->buf_fix_count != 0 |
874 | +- || buf_page_get_io_fix(b) != BUF_IO_NONE) { |
875 | +- fixed_pages_number++; |
876 | +- } |
877 | +- } |
878 | +- |
879 | + buf_flush_list_mutex_enter(buf_pool); |
880 | + for (b = UT_LIST_GET_FIRST(buf_pool->flush_list); b; |
881 | + b = UT_LIST_GET_NEXT(flush_list, b)) { |
882 | diff -ruN a/storage/innobase/buf/buf0flu.c b/storage/innobase/buf/buf0flu.c |
883 | --- a/storage/innobase/buf/buf0flu.c 2011-02-23 19:00:48.182659256 +0900 |
884 | +++ b/storage/innobase/buf/buf0flu.c 2011-02-23 19:01:19.138826278 +0900 |
885 | @@ -47,31 +786,66 @@ |
886 | if (flush_type != BUF_FLUSH_LRU) { |
887 | |
888 | return(TRUE); |
889 | +@@ -527,7 +534,6 @@ |
890 | + case BUF_BLOCK_ZIP_DIRTY: |
891 | + buf_page_set_state(bpage, BUF_BLOCK_ZIP_PAGE); |
892 | + UT_LIST_REMOVE(flush_list, buf_pool->flush_list, bpage); |
893 | +- buf_LRU_insert_zip_clean(bpage); |
894 | + break; |
895 | + case BUF_BLOCK_FILE_PAGE: |
896 | + UT_LIST_REMOVE(flush_list, buf_pool->flush_list, bpage); |
897 | diff -ruN a/storage/innobase/buf/buf0lru.c b/storage/innobase/buf/buf0lru.c |
898 | --- a/storage/innobase/buf/buf0lru.c 2011-02-23 19:00:47.939695791 +0900 |
899 | +++ b/storage/innobase/buf/buf0lru.c 2011-02-23 19:01:19.142741970 +0900 |
900 | -@@ -574,6 +574,37 @@ |
901 | +@@ -574,38 +574,59 @@ |
902 | } |
903 | } |
904 | |
905 | +-/********************************************************************//** |
906 | +-Insert a compressed block into buf_pool->zip_clean in the LRU order. */ |
907 | +/******************************************************************//** |
908 | +*/ |
909 | -+UNIV_INTERN |
910 | -+void |
911 | + UNIV_INTERN |
912 | + void |
913 | +-buf_LRU_insert_zip_clean( |
914 | +-/*=====================*/ |
915 | +- buf_page_t* bpage) /*!< in: pointer to the block in question */ |
916 | +buf_LRU_mark_space_was_deleted( |
917 | +/*===========================*/ |
918 | + ulint id) /*!< in: space id */ |
919 | -+{ |
920 | + { |
921 | +- buf_page_t* b; |
922 | +- buf_pool_t* buf_pool = buf_pool_from_bpage(bpage); |
923 | + ulint i; |
924 | -+ |
925 | + |
926 | +- //ut_ad(buf_pool_mutex_own(buf_pool)); |
927 | +- ut_ad(mutex_own(&buf_pool->LRU_list_mutex)); |
928 | +- ut_ad(mutex_own(&buf_pool->zip_mutex)); |
929 | +- ut_ad(buf_page_get_state(bpage) == BUF_BLOCK_ZIP_PAGE); |
930 | + for (i = 0; i < srv_buf_pool_instances; i++) { |
931 | + buf_pool_t* buf_pool; |
932 | + buf_page_t* bpage; |
933 | -+ |
934 | ++ buf_chunk_t* chunk; |
935 | ++ ulint j, k; |
936 | + |
937 | +- /* Find the first successor of bpage in the LRU list |
938 | +- that is in the zip_clean list. */ |
939 | +- b = bpage; |
940 | +- do { |
941 | +- b = UT_LIST_GET_NEXT(LRU, b); |
942 | +- } while (b && (buf_page_get_state(b) != BUF_BLOCK_ZIP_PAGE || !b->in_LRU_list)); |
943 | + buf_pool = buf_pool_from_array(i); |
944 | -+ |
945 | + |
946 | +- /* Insert bpage before b, i.e., after the predecessor of b. */ |
947 | +- if (b) { |
948 | +- b = UT_LIST_GET_PREV(zip_list, b); |
949 | +- } |
950 | + mutex_enter(&buf_pool->LRU_list_mutex); |
951 | -+ |
952 | + |
953 | +- if (b) { |
954 | +- UT_LIST_INSERT_AFTER(zip_list, buf_pool->zip_clean, b, bpage); |
955 | +- } else { |
956 | +- UT_LIST_ADD_FIRST(zip_list, buf_pool->zip_clean, bpage); |
957 | + bpage = UT_LIST_GET_FIRST(buf_pool->LRU); |
958 | + |
959 | + while (bpage != NULL) { |
960 | @@ -82,13 +856,33 @@ |
961 | + } |
962 | + |
963 | + mutex_exit(&buf_pool->LRU_list_mutex); |
964 | -+ } |
965 | -+} |
966 | -+ |
967 | - /********************************************************************//** |
968 | - Insert a compressed block into buf_pool->zip_clean in the LRU order. */ |
969 | - UNIV_INTERN |
970 | -@@ -1558,6 +1589,10 @@ |
971 | ++ |
972 | ++ rw_lock_s_lock(&btr_search_latch); |
973 | ++ chunk = buf_pool->chunks; |
974 | ++ for (j = buf_pool->n_chunks; j--; chunk++) { |
975 | ++ buf_block_t* block = chunk->blocks; |
976 | ++ for (k = chunk->size; k--; block++) { |
977 | ++ if (buf_block_get_state(block) |
978 | ++ != BUF_BLOCK_FILE_PAGE |
979 | ++ || !block->is_hashed |
980 | ++ || buf_page_get_space(&block->page) != id) { |
981 | ++ continue; |
982 | ++ } |
983 | ++ |
984 | ++ rw_lock_s_unlock(&btr_search_latch); |
985 | ++ |
986 | ++ rw_lock_x_lock(&block->lock); |
987 | ++ btr_search_drop_page_hash_index(block, NULL); |
988 | ++ rw_lock_x_unlock(&block->lock); |
989 | ++ |
990 | ++ rw_lock_s_lock(&btr_search_latch); |
991 | ++ } |
992 | ++ } |
993 | ++ rw_lock_s_unlock(&btr_search_latch); |
994 | + } |
995 | + } |
996 | + |
997 | +@@ -1558,6 +1579,10 @@ |
998 | return(BUF_LRU_NOT_FREED); |
999 | } |
1000 | |
1001 | @@ -99,6 +893,23 @@ |
1002 | #ifdef UNIV_IBUF_COUNT_DEBUG |
1003 | ut_a(ibuf_count_get(bpage->space, bpage->offset) == 0); |
1004 | #endif /* UNIV_IBUF_COUNT_DEBUG */ |
1005 | +@@ -1733,7 +1758,6 @@ |
1006 | + |
1007 | + mutex_enter(&buf_pool->zip_mutex); |
1008 | + if (b->state == BUF_BLOCK_ZIP_PAGE) { |
1009 | +- buf_LRU_insert_zip_clean(b); |
1010 | + } else { |
1011 | + /* Relocate on buf_pool->flush_list. */ |
1012 | + buf_flush_relocate_on_flush_list(bpage, b); |
1013 | +@@ -2038,8 +2062,6 @@ |
1014 | + ut_a(bpage->zip.data); |
1015 | + ut_a(buf_page_get_zip_size(bpage)); |
1016 | + |
1017 | +- UT_LIST_REMOVE(zip_list, buf_pool->zip_clean, bpage); |
1018 | +- |
1019 | + mutex_exit(&buf_pool->zip_mutex); |
1020 | + //buf_pool_mutex_exit_forbid(buf_pool); |
1021 | + |
1022 | diff -ruN a/storage/innobase/fil/fil0fil.c b/storage/innobase/fil/fil0fil.c |
1023 | --- a/storage/innobase/fil/fil0fil.c 2011-02-23 19:00:48.223696428 +0900 |
1024 | +++ b/storage/innobase/fil/fil0fil.c 2011-02-23 19:01:19.147655510 +0900 |
1025 | @@ -336,7 +1147,20 @@ |
1026 | diff -ruN a/storage/innobase/include/buf0buf.h b/storage/innobase/include/buf0buf.h |
1027 | --- a/storage/innobase/include/buf0buf.h 2011-02-23 19:00:48.252696774 +0900 |
1028 | +++ b/storage/innobase/include/buf0buf.h 2011-02-23 19:01:19.182655902 +0900 |
1029 | -@@ -1438,6 +1438,7 @@ |
1030 | +@@ -258,12 +258,6 @@ |
1031 | + BUF_BLOCK_ZIP_DIRTY or BUF_BLOCK_ZIP_PAGE */ |
1032 | + buf_page_t* dpage) /*!< in/out: destination control block */ |
1033 | + __attribute__((nonnull)); |
1034 | +-/********************************************************************//** |
1035 | +-Resizes the buffer pool. */ |
1036 | +-UNIV_INTERN |
1037 | +-void |
1038 | +-buf_pool_resize(void); |
1039 | +-/*=================*/ |
1040 | + /*********************************************************************//** |
1041 | + Gets the current size of buffer buf_pool in bytes. |
1042 | + @return size in bytes */ |
1043 | +@@ -1438,6 +1432,7 @@ |
1044 | 0 if the block was never accessed |
1045 | in the buffer pool */ |
1046 | /* @} */ |
1047 | @@ -344,6 +1168,15 @@ |
1048 | ibool is_corrupt; |
1049 | # if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG |
1050 | ibool file_page_was_freed; |
1051 | +@@ -1777,8 +1772,6 @@ |
1052 | + frames and buf_page_t descriptors of blocks that exist |
1053 | + in the buffer pool only in compressed form. */ |
1054 | + /* @{ */ |
1055 | +- UT_LIST_BASE_NODE_T(buf_page_t) zip_clean; |
1056 | +- /*!< unmodified compressed pages */ |
1057 | + UT_LIST_BASE_NODE_T(buf_page_t) zip_free[BUF_BUDDY_SIZES_MAX]; |
1058 | + /*!< buddy free lists */ |
1059 | + |
1060 | diff -ruN a/storage/innobase/include/buf0buf.ic b/storage/innobase/include/buf0buf.ic |
1061 | --- a/storage/innobase/include/buf0buf.ic 2011-02-23 19:00:48.130659154 +0900 |
1062 | +++ b/storage/innobase/include/buf0buf.ic 2011-02-23 19:01:19.185655906 +0900 |
1063 | @@ -358,20 +1191,25 @@ |
1064 | diff -ruN a/storage/innobase/include/buf0lru.h b/storage/innobase/include/buf0lru.h |
1065 | --- a/storage/innobase/include/buf0lru.h 2011-02-23 19:00:47.977658923 +0900 |
1066 | +++ b/storage/innobase/include/buf0lru.h 2011-02-23 19:01:19.188625768 +0900 |
1067 | -@@ -85,6 +85,13 @@ |
1068 | +@@ -85,13 +85,13 @@ |
1069 | buf_LRU_invalidate_tablespace( |
1070 | /*==========================*/ |
1071 | ulint id); /*!< in: space id */ |
1072 | +-/********************************************************************//** |
1073 | +-Insert a compressed block into buf_pool->zip_clean in the LRU order. */ |
1074 | +/******************************************************************//** |
1075 | +*/ |
1076 | -+UNIV_INTERN |
1077 | -+void |
1078 | + UNIV_INTERN |
1079 | + void |
1080 | +-buf_LRU_insert_zip_clean( |
1081 | +-/*=====================*/ |
1082 | +- buf_page_t* bpage); /*!< in: pointer to the block in question */ |
1083 | +buf_LRU_mark_space_was_deleted( |
1084 | +/*===========================*/ |
1085 | + ulint id); /*!< in: space id */ |
1086 | - /********************************************************************//** |
1087 | - Insert a compressed block into buf_pool->zip_clean in the LRU order. */ |
1088 | - UNIV_INTERN |
1089 | + |
1090 | + /******************************************************************//** |
1091 | + Try to free a block. If bpage is a descriptor of a compressed-only |
1092 | diff -ruN a/storage/innobase/include/os0file.h b/storage/innobase/include/os0file.h |
1093 | --- a/storage/innobase/include/os0file.h 2011-02-23 19:00:48.260696646 +0900 |
1094 | +++ b/storage/innobase/include/os0file.h 2011-02-23 19:01:19.190656054 +0900 |
1095 | @@ -638,6 +1476,56 @@ |
1096 | if (prebuilt->sql_stat_start) { |
1097 | node->state = INS_NODE_SET_IX_LOCK; |
1098 | prebuilt->sql_stat_start = FALSE; |
1099 | +@@ -2597,10 +2605,29 @@ |
1100 | + |
1101 | + err = DB_ERROR; |
1102 | + } else { |
1103 | ++ dict_index_t* index; |
1104 | ++ |
1105 | + /* Set the flag which tells that now it is legal to |
1106 | + IMPORT a tablespace for this table */ |
1107 | + table->tablespace_discarded = TRUE; |
1108 | + table->ibd_file_missing = TRUE; |
1109 | ++ |
1110 | ++ /* check adaptive hash entries */ |
1111 | ++ index = dict_table_get_first_index(table); |
1112 | ++ while (index) { |
1113 | ++ ulint ref_count = btr_search_info_get_ref_count(index->search_info, index->id); |
1114 | ++ if (ref_count) { |
1115 | ++ fprintf(stderr, "InnoDB: Warning:" |
1116 | ++ " hash index ref_count (%lu) is not zero" |
1117 | ++ " after fil_discard_tablespace().\n" |
1118 | ++ "index: \"%s\"" |
1119 | ++ " table: \"%s\"\n", |
1120 | ++ ref_count, |
1121 | ++ index->name, |
1122 | ++ table->name); |
1123 | ++ } |
1124 | ++ index = dict_table_get_next_index(index); |
1125 | ++ } |
1126 | + } |
1127 | + } |
1128 | + |
1129 | +@@ -2949,6 +2976,19 @@ |
1130 | + table->space = space; |
1131 | + index = dict_table_get_first_index(table); |
1132 | + do { |
1133 | ++ ulint ref_count = btr_search_info_get_ref_count(index->search_info, index->id); |
1134 | ++ /* check adaptive hash entries */ |
1135 | ++ if (ref_count) { |
1136 | ++ fprintf(stderr, "InnoDB: Warning:" |
1137 | ++ " hash index ref_count (%lu) is not zero" |
1138 | ++ " after fil_discard_tablespace().\n" |
1139 | ++ "index: \"%s\"" |
1140 | ++ " table: \"%s\"\n", |
1141 | ++ ref_count, |
1142 | ++ index->name, |
1143 | ++ table->name); |
1144 | ++ } |
1145 | ++ |
1146 | + index->space = space; |
1147 | + index = dict_table_get_next_index(index); |
1148 | + } while (index); |
1149 | diff -ruN a/storage/innobase/row/row0sel.c b/storage/innobase/row/row0sel.c |
1150 | --- a/storage/innobase/row/row0sel.c 2010-12-04 15:52:23.494514495 +0900 |
1151 | +++ b/storage/innobase/row/row0sel.c 2010-12-04 16:01:38.320883699 +0900 |
Stewart,
I don't understand why this bug fix which is wanted by customer at issue (#17014)
is blocked and not included to our next 5.5.13 release yet.