6
6
# http://bugs.mysql.com/56433 (always: because good for all users, and safe)
7
7
# and http://bugs.mysql.com/51325 (optional: innodb_lazy_drop_table)
8
# and http://bugs.mysql.com/61341 (always for not-debug build)
8
9
# were added. They may be removed in the future when will be fixed officially.
11
12
# Any small change to this file in the main branch
12
13
# should be done or reviewed by the maintainer!
14
diff -ruN a/storage/innodb_plugin/buf/buf0buddy.c b/storage/innodb_plugin/buf/buf0buddy.c
15
--- a/storage/innodb_plugin/buf/buf0buddy.c 2011-06-23 17:58:32.811788353 +0900
16
+++ b/storage/innodb_plugin/buf/buf0buddy.c 2011-06-23 17:59:22.291827133 +0900
18
buf_page_t* bpage, /*!< in: block to relocate */
19
buf_page_t* dpage) /*!< in: free block to relocate to */
21
+#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
23
+#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
25
//ut_ad(buf_pool_mutex_own());
26
#ifdef UNIV_SYNC_DEBUG
28
buf_relocate(bpage, dpage);
29
ut_d(bpage->state = BUF_BLOCK_ZIP_FREE);
31
+#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
32
/* relocate buf_pool->zip_clean */
33
mutex_enter(&flush_list_mutex);
34
b = UT_LIST_GET_PREV(zip_list, dpage);
36
UT_LIST_ADD_FIRST(zip_list, buf_pool->zip_clean, dpage);
38
mutex_exit(&flush_list_mutex);
39
+#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
41
UNIV_MEM_INVALID(bpage, sizeof *bpage);
13
43
diff -ruN a/storage/innodb_plugin/buf/buf0buf.c b/storage/innodb_plugin/buf/buf0buf.c
14
44
--- a/storage/innodb_plugin/buf/buf0buf.c 2011-02-21 20:31:57.781983359 +0900
15
45
+++ b/storage/innodb_plugin/buf/buf0buf.c 2011-02-21 20:32:39.523946003 +0900
50
-/*********************************************************************//**
51
-Checks that all blocks in the buffer chunk are in BUF_BLOCK_NOT_USED state.
52
-@return TRUE if all freed */
57
- const buf_chunk_t* chunk) /*!< in: chunk being checked */
59
- const buf_block_t* block;
63
- ut_ad(buf_pool_mutex_own()); /* but we need all mutex here */
65
- block = chunk->blocks;
67
- for (i = chunk->size; i--; block++) {
69
- if (buf_block_get_state(block) != BUF_BLOCK_NOT_USED) {
78
-/********************************************************************//**
79
-Frees a chunk of buffer frames. */
84
- buf_chunk_t* chunk) /*!< out: chunk of buffers */
87
- const buf_block_t* block_end;
89
- ut_ad(buf_pool_mutex_own()); /* but we need all mutex here */
91
- block_end = chunk->blocks + chunk->size;
93
- for (block = chunk->blocks; block < block_end; block++) {
94
- ut_a(buf_block_get_state(block) == BUF_BLOCK_NOT_USED);
95
- ut_a(!block->page.zip.data);
97
- ut_ad(!block->page.in_LRU_list);
98
- ut_ad(!block->in_unzip_LRU_list);
99
- ut_ad(!block->page.in_flush_list);
100
- /* Remove the block from the free list. */
101
- mutex_enter(&free_list_mutex);
102
- ut_ad(block->page.in_free_list);
103
- UT_LIST_REMOVE(free, buf_pool->free, (&block->page));
104
- mutex_exit(&free_list_mutex);
106
- /* Free the latches. */
107
- mutex_free(&block->mutex);
108
- rw_lock_free(&block->lock);
109
-#ifdef UNIV_SYNC_DEBUG
110
- rw_lock_free(&block->debug_latch);
111
-#endif /* UNIV_SYNC_DEBUG */
112
- UNIV_MEM_UNDESC(block);
115
- os_mem_free_large(chunk->mem, chunk->mem_size);
118
/********************************************************************//**
119
Creates the buffer pool.
120
@return own: buf_pool object, NULL if not enough memory or error */
121
@@ -1106,8 +1038,6 @@
122
chunk = chunks + buf_pool->n_chunks;
124
while (--chunk >= chunks) {
125
- /* Bypass the checks of buf_chunk_free(), since they
126
- would fail at shutdown. */
127
os_mem_free_large(chunk->mem, chunk->mem_size);
130
@@ -1287,325 +1217,6 @@
133
/********************************************************************//**
134
-Shrinks the buffer pool. */
139
- ulint chunk_size) /*!< in: number of pages to remove */
141
- buf_chunk_t* chunks;
142
- buf_chunk_t* chunk;
144
- ulint max_free_size;
145
- buf_chunk_t* max_chunk;
146
- buf_chunk_t* max_free_chunk;
148
- ut_ad(!buf_pool_mutex_own());
151
- btr_search_disable(); /* Empty the adaptive hash index again */
152
- //buf_pool_mutex_enter();
153
- mutex_enter(&LRU_list_mutex);
156
- if (buf_pool->n_chunks <= 1) {
158
- /* Cannot shrink if there is only one chunk */
162
- /* Search for the largest free chunk
163
- not larger than the size difference */
164
- chunks = buf_pool->chunks;
165
- chunk = chunks + buf_pool->n_chunks;
166
- max_size = max_free_size = 0;
167
- max_chunk = max_free_chunk = NULL;
169
- while (--chunk >= chunks) {
170
- if (chunk->size <= chunk_size
171
- && chunk->size > max_free_size) {
172
- if (chunk->size > max_size) {
173
- max_size = chunk->size;
177
- if (buf_chunk_all_free(chunk)) {
178
- max_free_size = chunk->size;
179
- max_free_chunk = chunk;
184
- if (!max_free_size) {
188
- buf_block_t* block;
191
- /* Cannot shrink: try again later
192
- (do not assign srv_buf_pool_old_size) */
198
- block = max_chunk->blocks;
199
- bend = block + max_chunk->size;
201
- /* Move the blocks of chunk to the end of the
202
- LRU list and try to flush them. */
203
- for (; block < bend; block++) {
204
- switch (buf_block_get_state(block)) {
205
- case BUF_BLOCK_NOT_USED:
207
- case BUF_BLOCK_FILE_PAGE:
214
- mutex_enter(&block->mutex);
215
- /* The following calls will temporarily
216
- release block->mutex and buf_pool_mutex.
217
- Therefore, we have to always retry,
218
- even if !dirty && !nonfree. */
220
- if (!buf_flush_ready_for_replace(&block->page)) {
222
- buf_LRU_make_block_old(&block->page);
224
- } else if (buf_LRU_free_block(&block->page, TRUE, FALSE)
225
- != BUF_LRU_FREED) {
229
- mutex_exit(&block->mutex);
232
- //buf_pool_mutex_exit();
233
- mutex_exit(&LRU_list_mutex);
235
- /* Request for a flush of the chunk if it helps.
236
- Do not flush if there are non-free blocks, since
237
- flushing will not make the chunk freeable. */
239
- /* Avoid busy-waiting. */
240
- os_thread_sleep(100000);
242
- && buf_flush_batch(BUF_FLUSH_LRU, dirty, 0)
243
- == ULINT_UNDEFINED) {
245
- buf_flush_wait_batch_end(BUF_FLUSH_LRU);
251
- max_size = max_free_size;
252
- max_chunk = max_free_chunk;
254
- srv_buf_pool_old_size = srv_buf_pool_size;
256
- /* Rewrite buf_pool->chunks. Copy everything but max_chunk. */
257
- chunks = mem_alloc((buf_pool->n_chunks - 1) * sizeof *chunks);
258
- memcpy(chunks, buf_pool->chunks,
259
- (max_chunk - buf_pool->chunks) * sizeof *chunks);
260
- memcpy(chunks + (max_chunk - buf_pool->chunks),
262
- buf_pool->chunks + buf_pool->n_chunks
263
- - (max_chunk + 1));
264
- ut_a(buf_pool->curr_size > max_chunk->size);
265
- buf_pool->curr_size -= max_chunk->size;
266
- srv_buf_pool_curr_size = buf_pool->curr_size * UNIV_PAGE_SIZE;
267
- chunk_size -= max_chunk->size;
268
- buf_chunk_free(max_chunk);
269
- mem_free(buf_pool->chunks);
270
- buf_pool->chunks = chunks;
271
- buf_pool->n_chunks--;
273
- /* Allow a slack of one megabyte. */
274
- if (chunk_size > 1048576 / UNIV_PAGE_SIZE) {
280
- srv_buf_pool_old_size = srv_buf_pool_size;
282
- //buf_pool_mutex_exit();
283
- mutex_exit(&LRU_list_mutex);
284
- btr_search_enable();
287
-/********************************************************************//**
288
-Rebuild buf_pool->page_hash. */
291
-buf_pool_page_hash_rebuild(void)
292
-/*============================*/
296
- buf_chunk_t* chunk;
297
- hash_table_t* page_hash;
298
- hash_table_t* zip_hash;
301
- //buf_pool_mutex_enter();
302
- mutex_enter(&LRU_list_mutex);
303
- rw_lock_x_lock(&page_hash_latch);
304
- mutex_enter(&flush_list_mutex);
307
- /* Free, create, and populate the hash table. */
308
- hash_table_free(buf_pool->page_hash);
309
- buf_pool->page_hash = page_hash = hash_create(2 * buf_pool->curr_size);
310
- zip_hash = hash_create(2 * buf_pool->curr_size);
312
- HASH_MIGRATE(buf_pool->zip_hash, zip_hash, buf_page_t, hash,
313
- BUF_POOL_ZIP_FOLD_BPAGE);
315
- hash_table_free(buf_pool->zip_hash);
316
- buf_pool->zip_hash = zip_hash;
318
- /* Insert the uncompressed file pages to buf_pool->page_hash. */
320
- chunk = buf_pool->chunks;
321
- n_chunks = buf_pool->n_chunks;
323
- for (i = 0; i < n_chunks; i++, chunk++) {
325
- buf_block_t* block = chunk->blocks;
327
- for (j = 0; j < chunk->size; j++, block++) {
328
- if (buf_block_get_state(block)
329
- == BUF_BLOCK_FILE_PAGE) {
330
- ut_ad(!block->page.in_zip_hash);
331
- ut_ad(block->page.in_page_hash);
333
- HASH_INSERT(buf_page_t, hash, page_hash,
334
- buf_page_address_fold(
336
- block->page.offset),
342
- /* Insert the compressed-only pages to buf_pool->page_hash.
343
- All such blocks are either in buf_pool->zip_clean or
344
- in buf_pool->flush_list. */
346
- for (b = UT_LIST_GET_FIRST(buf_pool->zip_clean); b;
347
- b = UT_LIST_GET_NEXT(zip_list, b)) {
348
- ut_a(buf_page_get_state(b) == BUF_BLOCK_ZIP_PAGE);
349
- ut_ad(!b->in_flush_list);
350
- ut_ad(b->in_LRU_list);
351
- ut_ad(b->in_page_hash);
352
- ut_ad(!b->in_zip_hash);
354
- HASH_INSERT(buf_page_t, hash, page_hash,
355
- buf_page_address_fold(b->space, b->offset), b);
358
- for (b = UT_LIST_GET_FIRST(buf_pool->flush_list); b;
359
- b = UT_LIST_GET_NEXT(flush_list, b)) {
360
- ut_ad(b->in_flush_list);
361
- ut_ad(b->in_LRU_list);
362
- ut_ad(b->in_page_hash);
363
- ut_ad(!b->in_zip_hash);
365
- switch (buf_page_get_state(b)) {
366
- case BUF_BLOCK_ZIP_DIRTY:
367
- HASH_INSERT(buf_page_t, hash, page_hash,
368
- buf_page_address_fold(b->space,
371
- case BUF_BLOCK_FILE_PAGE:
372
- /* uncompressed page */
374
- case BUF_BLOCK_ZIP_FREE:
375
- case BUF_BLOCK_ZIP_PAGE:
376
- case BUF_BLOCK_NOT_USED:
377
- case BUF_BLOCK_READY_FOR_USE:
378
- case BUF_BLOCK_MEMORY:
379
- case BUF_BLOCK_REMOVE_HASH:
385
- //buf_pool_mutex_exit();
386
- mutex_exit(&LRU_list_mutex);
387
- rw_lock_x_unlock(&page_hash_latch);
388
- mutex_exit(&flush_list_mutex);
391
-/********************************************************************//**
392
-Resizes the buffer pool. */
395
-buf_pool_resize(void)
396
-/*=================*/
398
- //buf_pool_mutex_enter();
399
- mutex_enter(&LRU_list_mutex);
401
- if (srv_buf_pool_old_size == srv_buf_pool_size) {
403
- //buf_pool_mutex_exit();
404
- mutex_exit(&LRU_list_mutex);
408
- if (srv_buf_pool_curr_size + 1048576 > srv_buf_pool_size) {
410
- //buf_pool_mutex_exit();
411
- mutex_exit(&LRU_list_mutex);
413
- /* Disable adaptive hash indexes and empty the index
414
- in order to free up memory in the buffer pool chunks. */
415
- buf_pool_shrink((srv_buf_pool_curr_size - srv_buf_pool_size)
417
- } else if (srv_buf_pool_curr_size + 1048576 < srv_buf_pool_size) {
419
- /* Enlarge the buffer pool by at least one megabyte */
422
- = srv_buf_pool_size - srv_buf_pool_curr_size;
423
- buf_chunk_t* chunks;
424
- buf_chunk_t* chunk;
426
- chunks = mem_alloc((buf_pool->n_chunks + 1) * sizeof *chunks);
428
- memcpy(chunks, buf_pool->chunks, buf_pool->n_chunks
431
- chunk = &chunks[buf_pool->n_chunks];
433
- if (!buf_chunk_init(chunk, mem_size)) {
436
- buf_pool->curr_size += chunk->size;
437
- srv_buf_pool_curr_size = buf_pool->curr_size
439
- mem_free(buf_pool->chunks);
440
- buf_pool->chunks = chunks;
441
- buf_pool->n_chunks++;
444
- srv_buf_pool_old_size = srv_buf_pool_size;
445
- //buf_pool_mutex_exit();
446
- mutex_exit(&LRU_list_mutex);
449
- buf_pool_page_hash_rebuild();
452
-/********************************************************************//**
453
Moves a page to the start of the buffer pool LRU list. This high-level
454
function can be used to prevent an important page from slipping out of
456
@@ -2448,8 +2059,10 @@
458
if (buf_page_get_state(&block->page)
459
== BUF_BLOCK_ZIP_PAGE) {
460
+#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
461
UT_LIST_REMOVE(zip_list, buf_pool->zip_clean,
463
+#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
464
ut_ad(!block->page.in_flush_list);
466
/* Relocate buf_pool->flush_list. */
467
@@ -3243,6 +2856,7 @@
17
468
bpage->state = BUF_BLOCK_ZIP_PAGE;
18
469
bpage->space = space;
19
470
bpage->offset = offset;