~ubuntu-branches/ubuntu/precise/mysql-5.5/precise-201203300109

« back to all changes in this revision

Viewing changes to storage/innobase/buf/buf0lru.c

  • Committer: Package Import Robot
  • Author(s): Clint Byrum
  • Date: 2012-02-14 23:59:22 UTC
  • mfrom: (1.1.2)
  • Revision ID: package-import@ubuntu.com-20120214235922-cux5uek1e5l0hje9
Tags: 5.5.20-0ubuntu1
* New upstream release.
* d/mysql-server-5.5.mysql.upstart: Fix stop on to make sure mysql is
  fully stopped before shutdown commences. (LP: #688541) Also simplify
  start on as it is redundant.
* d/control: Depend on upstart version which has apparmor profile load
  script to prevent failure on upgrade from lucid to precise.
  (LP: #907465)
* d/apparmor-profile: need to allow /run since that is the true path
  of /var/run files. (LP: #917542)
* d/control: mysql-server-5.5 has files in it that used to be owned
  by libmysqlclient-dev, so it must break/replace it. (LP: #912487)
* d/rules, d/control: 5.5.20 Fixes segfault on tests with gcc 4.6,
  change compiler back to system default.
* d/rules: Turn off embedded libedit/readline.(Closes: #659566)

Show diffs side-by-side

added added

removed removed

Lines of Context:
68
68
 
69
69
/** When dropping the search hash index entries before deleting an ibd
70
70
file, we build a local array of pages belonging to that tablespace
71
 
in the buffer pool. Following is the size of that array. */
72
 
#define BUF_LRU_DROP_SEARCH_HASH_SIZE   1024
 
71
in the buffer pool. Following is the size of that array.
 
72
We also release buf_pool->mutex after scanning this many pages of the
 
73
flush_list when dropping a table. This is to ensure that other threads
 
74
are not blocked for extended period of time when using very large
 
75
buffer pools. */
 
76
#define BUF_LRU_DROP_SEARCH_SIZE        1024
73
77
 
74
78
/** If we switch on the InnoDB monitor because there are too few available
75
79
frames in the buffer pool, we set this to TRUE */
210
214
        ulint   i;
211
215
 
212
216
        ut_ad(arr != NULL);
213
 
        ut_ad(count <= BUF_LRU_DROP_SEARCH_HASH_SIZE);
 
217
        ut_ad(count <= BUF_LRU_DROP_SEARCH_SIZE);
214
218
 
215
219
        for (i = 0; i < count; ++i) {
216
220
                btr_search_drop_page_hash_when_freed(space_id, zip_size,
244
248
        }
245
249
 
246
250
        page_arr = ut_malloc(
247
 
                sizeof(ulint) * BUF_LRU_DROP_SEARCH_HASH_SIZE);
 
251
                sizeof(ulint) * BUF_LRU_DROP_SEARCH_SIZE);
248
252
 
249
253
        buf_pool_mutex_enter(buf_pool);
250
254
        num_entries = 0;
273
277
 
274
278
                mutex_enter(&((buf_block_t*) bpage)->mutex);
275
279
                is_fixed = bpage->buf_fix_count > 0
276
 
                        || !((buf_block_t*) bpage)->is_hashed;
 
280
                        || !((buf_block_t*) bpage)->index;
277
281
                mutex_exit(&((buf_block_t*) bpage)->mutex);
278
282
 
279
283
                if (is_fixed) {
283
287
                /* Store the page number so that we can drop the hash
284
288
                index in a batch later. */
285
289
                page_arr[num_entries] = bpage->offset;
286
 
                ut_a(num_entries < BUF_LRU_DROP_SEARCH_HASH_SIZE);
 
290
                ut_a(num_entries < BUF_LRU_DROP_SEARCH_SIZE);
287
291
                ++num_entries;
288
292
 
289
 
                if (num_entries < BUF_LRU_DROP_SEARCH_HASH_SIZE) {
 
293
                if (num_entries < BUF_LRU_DROP_SEARCH_SIZE) {
290
294
                        goto next_page;
291
295
                }
292
296
 
331
335
}
332
336
 
333
337
/******************************************************************//**
334
 
Invalidates all pages belonging to a given tablespace inside a specific
 
338
Remove all dirty pages belonging to a given tablespace inside a specific
335
339
buffer pool instance when we are deleting the data file(s) of that
336
 
tablespace. */
 
340
tablespace. The pages still remain a part of LRU and are evicted from
 
341
the list as they age towards the tail of the LRU. */
337
342
static
338
343
void
339
 
buf_LRU_invalidate_tablespace_buf_pool_instance(
340
 
/*============================================*/
 
344
buf_LRU_remove_dirty_pages_for_tablespace(
 
345
/*======================================*/
341
346
        buf_pool_t*     buf_pool,       /*!< buffer pool instance */
342
347
        ulint           id)             /*!< in: space id */
343
348
{
344
349
        buf_page_t*     bpage;
345
350
        ibool           all_freed;
 
351
        ulint           i;
346
352
 
347
353
scan_again:
348
354
        buf_pool_mutex_enter(buf_pool);
 
355
        buf_flush_list_mutex_enter(buf_pool);
349
356
 
350
357
        all_freed = TRUE;
351
358
 
352
 
        bpage = UT_LIST_GET_LAST(buf_pool->LRU);
 
359
        for (bpage = UT_LIST_GET_LAST(buf_pool->flush_list), i = 0;
 
360
             bpage != NULL; ++i) {
353
361
 
354
 
        while (bpage != NULL) {
355
362
                buf_page_t*     prev_bpage;
356
363
                mutex_t*        block_mutex = NULL;
357
364
 
358
365
                ut_a(buf_page_in_file(bpage));
359
366
 
360
 
                prev_bpage = UT_LIST_GET_PREV(LRU, bpage);
 
367
                prev_bpage = UT_LIST_GET_PREV(list, bpage);
361
368
 
362
369
                /* bpage->space and bpage->io_fix are protected by
363
 
                buf_pool_mutex and block_mutex.  It is safe to check
364
 
                them while holding buf_pool_mutex only. */
 
370
                buf_pool->mutex and block_mutex. It is safe to check
 
371
                them while holding buf_pool->mutex only. */
365
372
 
366
373
                if (buf_page_get_space(bpage) != id) {
367
374
                        /* Skip this block, as it does not belong to
374
381
 
375
382
                        all_freed = FALSE;
376
383
                        goto next_page;
377
 
                } else {
378
 
                        block_mutex = buf_page_get_mutex(bpage);
379
 
                        mutex_enter(block_mutex);
380
 
 
381
 
                        if (bpage->buf_fix_count > 0) {
382
 
 
383
 
                                mutex_exit(block_mutex);
384
 
                                /* We cannot remove this page during
385
 
                                this scan yet; maybe the system is
386
 
                                currently reading it in, or flushing
387
 
                                the modifications to the file */
388
 
 
389
 
                                all_freed = FALSE;
390
 
 
391
 
                                goto next_page;
392
 
                        }
393
 
                }
394
 
 
395
 
                ut_ad(mutex_own(block_mutex));
396
 
 
397
 
#ifdef UNIV_DEBUG
398
 
                if (buf_debug_prints) {
399
 
                        fprintf(stderr,
400
 
                                "Dropping space %lu page %lu\n",
401
 
                                (ulong) buf_page_get_space(bpage),
402
 
                                (ulong) buf_page_get_page_no(bpage));
403
 
                }
404
 
#endif
405
 
                if (buf_page_get_state(bpage) != BUF_BLOCK_FILE_PAGE) {
406
 
                        /* This is a compressed-only block
407
 
                        descriptor. Do nothing. */
408
 
                } else if (((buf_block_t*) bpage)->is_hashed) {
409
 
                        ulint   page_no;
410
 
                        ulint   zip_size;
411
 
 
412
 
                        buf_pool_mutex_exit(buf_pool);
413
 
 
414
 
                        zip_size = buf_page_get_zip_size(bpage);
415
 
                        page_no = buf_page_get_page_no(bpage);
416
 
 
417
 
                        mutex_exit(block_mutex);
418
 
 
419
 
                        /* Note that the following call will acquire
420
 
                        an S-latch on the page */
421
 
 
422
 
                        btr_search_drop_page_hash_when_freed(
423
 
                                id, zip_size, page_no);
424
 
                        goto scan_again;
425
 
                }
426
 
 
427
 
                if (bpage->oldest_modification != 0) {
428
 
 
429
 
                        buf_flush_remove(bpage);
430
 
                }
431
 
 
432
 
                /* Remove from the LRU list. */
433
 
 
434
 
                if (buf_LRU_block_remove_hashed_page(bpage, TRUE)
435
 
                    != BUF_BLOCK_ZIP_FREE) {
436
 
                        buf_LRU_block_free_hashed_page((buf_block_t*) bpage);
437
 
                        mutex_exit(block_mutex);
438
 
                } else {
439
 
                        /* The block_mutex should have been released
440
 
                        by buf_LRU_block_remove_hashed_page() when it
441
 
                        returns BUF_BLOCK_ZIP_FREE. */
442
 
                        ut_ad(block_mutex == &buf_pool->zip_mutex);
443
 
                        ut_ad(!mutex_own(block_mutex));
444
 
                }
 
384
                }
 
385
 
 
386
                /* We have to release the flush_list_mutex to obey the
 
387
                latching order. We are however guaranteed that the page
 
388
                will stay in the flush_list because buf_flush_remove()
 
389
                needs buf_pool->mutex as well. */
 
390
                buf_flush_list_mutex_exit(buf_pool);
 
391
                block_mutex = buf_page_get_mutex(bpage);
 
392
                mutex_enter(block_mutex);
 
393
 
 
394
                if (bpage->buf_fix_count > 0) {
 
395
                        mutex_exit(block_mutex);
 
396
                        buf_flush_list_mutex_enter(buf_pool);
 
397
 
 
398
                        /* We cannot remove this page during
 
399
                        this scan yet; maybe the system is
 
400
                        currently reading it in, or flushing
 
401
                        the modifications to the file */
 
402
 
 
403
                        all_freed = FALSE;
 
404
                        goto next_page;
 
405
                }
 
406
 
 
407
                ut_ad(bpage->oldest_modification != 0);
 
408
 
 
409
                buf_flush_remove(bpage);
 
410
 
 
411
                mutex_exit(block_mutex);
 
412
                buf_flush_list_mutex_enter(buf_pool);
445
413
next_page:
446
414
                bpage = prev_bpage;
 
415
 
 
416
                if (!bpage) {
 
417
                        break;
 
418
                }
 
419
 
 
420
                /* Every BUF_LRU_DROP_SEARCH_SIZE iterations in the
 
421
                loop we release buf_pool->mutex to let other threads
 
422
                do their job. */
 
423
                if (i < BUF_LRU_DROP_SEARCH_SIZE) {
 
424
                        continue;
 
425
                }
 
426
 
 
427
                /* We IO-fix the block to make sure that the block
 
428
                stays in its position in the flush_list. */
 
429
                if (buf_page_get_io_fix(bpage) != BUF_IO_NONE) {
 
430
                        /* Block is already IO-fixed. We don't
 
431
                        want to change the value. Lets leave
 
432
                        this block alone. */
 
433
                        continue;
 
434
                }
 
435
 
 
436
                buf_flush_list_mutex_exit(buf_pool);
 
437
                block_mutex = buf_page_get_mutex(bpage);
 
438
                mutex_enter(block_mutex);
 
439
                buf_page_set_sticky(bpage);
 
440
                mutex_exit(block_mutex);
 
441
 
 
442
                /* Now it is safe to release the buf_pool->mutex. */
 
443
                buf_pool_mutex_exit(buf_pool);
 
444
                os_thread_yield();
 
445
                buf_pool_mutex_enter(buf_pool);
 
446
 
 
447
                mutex_enter(block_mutex);
 
448
                buf_page_unset_sticky(bpage);
 
449
                mutex_exit(block_mutex);
 
450
 
 
451
                buf_flush_list_mutex_enter(buf_pool);
 
452
                ut_ad(bpage->in_flush_list);
 
453
 
 
454
                i = 0;
447
455
        }
448
456
 
449
457
        buf_pool_mutex_exit(buf_pool);
 
458
        buf_flush_list_mutex_exit(buf_pool);
 
459
 
 
460
        ut_ad(buf_flush_validate(buf_pool));
450
461
 
451
462
        if (!all_freed) {
452
463
                os_thread_sleep(20000);
477
488
 
478
489
                buf_pool = buf_pool_from_array(i);
479
490
                buf_LRU_drop_page_hash_for_tablespace(buf_pool, id);
480
 
                buf_LRU_invalidate_tablespace_buf_pool_instance(buf_pool, id);
 
491
                buf_LRU_remove_dirty_pages_for_tablespace(buf_pool, id);
481
492
        }
482
493
}
483
494
 
1532
1543
                        /* Prevent buf_page_get_gen() from
1533
1544
                        decompressing the block while we release
1534
1545
                        buf_pool->mutex and block_mutex. */
1535
 
                        b->buf_fix_count++;
1536
 
                        b->io_fix = BUF_IO_READ;
 
1546
                        mutex_enter(&buf_pool->zip_mutex);
 
1547
                        buf_page_set_sticky(b);
 
1548
                        mutex_exit(&buf_pool->zip_mutex);
1537
1549
                }
1538
1550
 
1539
1551
                buf_pool_mutex_exit(buf_pool);
1573
1585
 
1574
1586
                if (b) {
1575
1587
                        mutex_enter(&buf_pool->zip_mutex);
1576
 
                        b->buf_fix_count--;
1577
 
                        buf_page_set_io_fix(b, BUF_IO_NONE);
 
1588
                        buf_page_unset_sticky(b);
1578
1589
                        mutex_exit(&buf_pool->zip_mutex);
1579
1590
                }
1580
1591