228
256
buf_flush_insert_into_flush_list(
229
257
/*=============================*/
230
buf_block_t* block) /*!< in/out: block which is modified */
258
buf_pool_t* buf_pool, /*!< buffer pool instance */
259
buf_block_t* block, /*!< in/out: block which is modified */
260
ib_uint64_t lsn) /*!< in: oldest modification */
232
ut_ad(buf_pool_mutex_own());
262
ut_ad(!buf_pool_mutex_own(buf_pool));
263
ut_ad(log_flush_order_mutex_own());
264
ut_ad(mutex_own(&block->mutex));
266
buf_flush_list_mutex_enter(buf_pool);
233
268
ut_ad((UT_LIST_GET_FIRST(buf_pool->flush_list) == NULL)
234
269
|| (UT_LIST_GET_FIRST(buf_pool->flush_list)->oldest_modification
235
<= block->page.oldest_modification));
237
272
/* If we are in the recovery then we need to update the flush
238
273
red-black tree as well. */
239
274
if (UNIV_LIKELY_NULL(buf_pool->flush_rbt)) {
240
buf_flush_insert_sorted_into_flush_list(block);
275
buf_flush_list_mutex_exit(buf_pool);
276
buf_flush_insert_sorted_into_flush_list(buf_pool, block, lsn);
244
280
ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
245
ut_ad(block->page.in_LRU_list);
246
ut_ad(block->page.in_page_hash);
247
ut_ad(!block->page.in_zip_hash);
248
281
ut_ad(!block->page.in_flush_list);
249
283
ut_d(block->page.in_flush_list = TRUE);
284
block->page.oldest_modification = lsn;
250
285
UT_LIST_ADD_FIRST(list, buf_pool->flush_list, &block->page);
287
#ifdef UNIV_DEBUG_VALGRIND
289
ulint zip_size = buf_block_get_zip_size(block);
291
if (UNIV_UNLIKELY(zip_size)) {
292
UNIV_MEM_ASSERT_RW(block->page.zip.data, zip_size);
294
UNIV_MEM_ASSERT_RW(block->frame, UNIV_PAGE_SIZE);
297
#endif /* UNIV_DEBUG_VALGRIND */
252
298
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
253
ut_a(buf_flush_validate_low());
299
ut_a(buf_flush_validate_low(buf_pool));
254
300
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
302
buf_flush_list_mutex_exit(buf_pool);
257
305
/********************************************************************//**
263
311
buf_flush_insert_sorted_into_flush_list(
264
312
/*====================================*/
265
buf_block_t* block) /*!< in/out: block which is modified */
313
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
314
buf_block_t* block, /*!< in/out: block which is modified */
315
ib_uint64_t lsn) /*!< in: oldest modification */
267
317
buf_page_t* prev_b;
270
ut_ad(buf_pool_mutex_own());
320
ut_ad(!buf_pool_mutex_own(buf_pool));
321
ut_ad(log_flush_order_mutex_own());
322
ut_ad(mutex_own(&block->mutex));
271
323
ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
325
buf_flush_list_mutex_enter(buf_pool);
327
/* The field in_LRU_list is protected by buf_pool_mutex, which
328
we are not holding. However, while a block is in the flush
329
list, it is dirty and cannot be discarded, not from the
330
page_hash or from the LRU list. At most, the uncompressed
331
page frame of a compressed block may be discarded or created
332
(copying the block->page to or from a buf_page_t that is
333
dynamically allocated from buf_buddy_alloc()). Because those
334
transitions hold block->mutex and the flush list mutex (via
335
buf_flush_relocate_on_flush_list()), there is no possibility
336
of a race condition in the assertions below. */
273
337
ut_ad(block->page.in_LRU_list);
274
338
ut_ad(block->page.in_page_hash);
339
/* buf_buddy_block_register() will take a block in the
340
BUF_BLOCK_MEMORY state, not a file page. */
275
341
ut_ad(!block->page.in_zip_hash);
276
343
ut_ad(!block->page.in_flush_list);
277
344
ut_d(block->page.in_flush_list = TRUE);
345
block->page.oldest_modification = lsn;
347
#ifdef UNIV_DEBUG_VALGRIND
349
ulint zip_size = buf_block_get_zip_size(block);
351
if (UNIV_UNLIKELY(zip_size)) {
352
UNIV_MEM_ASSERT_RW(block->page.zip.data, zip_size);
354
UNIV_MEM_ASSERT_RW(block->frame, UNIV_PAGE_SIZE);
357
#endif /* UNIV_DEBUG_VALGRIND */
359
#ifdef UNIV_DEBUG_VALGRIND
361
ulint zip_size = buf_block_get_zip_size(block);
363
if (UNIV_UNLIKELY(zip_size)) {
364
UNIV_MEM_ASSERT_RW(block->page.zip.data, zip_size);
366
UNIV_MEM_ASSERT_RW(block->frame, UNIV_PAGE_SIZE);
369
#endif /* UNIV_DEBUG_VALGRIND */
1131
1271
/*====================*/
1132
1272
ulint space, /*!< in: space id */
1133
1273
ulint offset, /*!< in: page offset */
1134
enum buf_flush flush_type) /*!< in: BUF_FLUSH_LRU or
1274
enum buf_flush flush_type, /*!< in: BUF_FLUSH_LRU or
1135
1275
BUF_FLUSH_LIST */
1276
ulint n_flushed, /*!< in: number of pages
1277
flushed so far in this batch */
1278
ulint n_to_flush) /*!< in: maximum number of pages
1279
we are allowed to flush */
1285
buf_pool_t* buf_pool = buf_pool_get(space, offset);
1142
1287
ut_ad(flush_type == BUF_FLUSH_LRU || flush_type == BUF_FLUSH_LIST);
1144
1289
if (UT_LIST_GET_LEN(buf_pool->LRU) < BUF_LRU_OLD_MIN_LEN) {
1145
/* If there is little space, it is better not to flush any
1146
block except from the end of the LRU list */
1290
/* If there is little space, it is better not to flush
1291
any block except from the end of the LRU list */
1149
1294
high = offset + 1;
1151
/* When flushed, dirty blocks are searched in neighborhoods of
1152
this size, and flushed along with the original page. */
1296
/* When flushed, dirty blocks are searched in
1297
neighborhoods of this size, and flushed along with the
1154
ulint buf_flush_area = ut_min(BUF_READ_AHEAD_AREA,
1155
buf_pool->curr_size / 16);
1300
ulint buf_flush_area;
1302
buf_flush_area = ut_min(
1303
BUF_READ_AHEAD_AREA(buf_pool),
1304
buf_pool->curr_size / 16);
1157
1306
low = (offset / buf_flush_area) * buf_flush_area;
1158
1307
high = (offset / buf_flush_area + 1) * buf_flush_area;
1190
1360
if (buf_flush_ready_for_flush(bpage, flush_type)
1191
1361
&& (i == offset || !bpage->buf_fix_count)) {
1192
1362
/* We only try to flush those
1193
neighbors != offset where the buf fix count is
1194
zero, as we then know that we probably can
1195
latch the page without a semaphore wait.
1196
Semaphore waits are expensive because we must
1197
flush the doublewrite buffer before we start
1363
neighbors != offset where the buf fix
1364
count is zero, as we then know that we
1365
probably can latch the page without a
1366
semaphore wait. Semaphore waits are
1367
expensive because we must flush the
1368
doublewrite buffer before we start
1200
buf_flush_page(bpage, flush_type);
1371
buf_flush_page(buf_pool, bpage, flush_type);
1201
1372
ut_ad(!mutex_own(block_mutex));
1373
ut_ad(!buf_pool_mutex_own(buf_pool));
1204
buf_pool_mutex_enter();
1206
1377
mutex_exit(block_mutex);
1211
buf_pool_mutex_exit();
1380
buf_pool_mutex_exit(buf_pool);
1386
/********************************************************************//**
1387
Check if the block is modified and ready for flushing. If the the block
1388
is ready to flush then flush the page and try o flush its neighbors.
1390
@return TRUE if buf_pool mutex was not released during this function.
1391
This does not guarantee that some pages were written as well.
1392
Number of pages written are incremented to the count. */
1395
buf_flush_page_and_try_neighbors(
1396
/*=============================*/
1397
buf_page_t* bpage, /*!< in: buffer control block,
1399
buf_page_in_file(bpage) */
1400
enum buf_flush flush_type, /*!< in: BUF_FLUSH_LRU
1401
or BUF_FLUSH_LIST */
1402
ulint n_to_flush, /*!< in: number of pages to
1404
ulint* count) /*!< in/out: number of pages
1407
mutex_t* block_mutex;
1408
ibool flushed = FALSE;
1410
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
1411
#endif /* UNIV_DEBUG */
1413
ut_ad(buf_pool_mutex_own(buf_pool));
1415
block_mutex = buf_page_get_mutex(bpage);
1416
mutex_enter(block_mutex);
1418
ut_a(buf_page_in_file(bpage));
1420
if (buf_flush_ready_for_flush(bpage, flush_type)) {
1423
buf_pool_t* buf_pool;
1425
buf_pool = buf_pool_from_bpage(bpage);
1427
buf_pool_mutex_exit(buf_pool);
1429
/* These fields are protected by both the
1430
buffer pool mutex and block mutex. */
1431
space = buf_page_get_space(bpage);
1432
offset = buf_page_get_page_no(bpage);
1434
mutex_exit(block_mutex);
1436
/* Try to flush also all the neighbors */
1437
*count += buf_flush_try_neighbors(space,
1443
buf_pool_mutex_enter(buf_pool);
1446
mutex_exit(block_mutex);
1449
ut_ad(buf_pool_mutex_own(buf_pool));
1454
/*******************************************************************//**
1455
This utility flushes dirty blocks from the end of the LRU list.
1456
In the case of an LRU flush the calling thread may own latches to
1457
pages: to avoid deadlocks, this function must be written so that it
1458
cannot end up waiting for these latches!
1459
@return number of blocks for which the write request was queued. */
1462
buf_flush_LRU_list_batch(
1463
/*=====================*/
1464
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
1465
ulint max) /*!< in: max of blocks to flush */
1470
ut_ad(buf_pool_mutex_own(buf_pool));
1473
/* Start from the end of the list looking for a
1474
suitable block to be flushed. */
1475
bpage = UT_LIST_GET_LAST(buf_pool->LRU);
1477
/* Iterate backwards over the flush list till we find
1478
a page that isn't ready for flushing. */
1479
while (bpage != NULL
1480
&& !buf_flush_page_and_try_neighbors(
1481
bpage, BUF_FLUSH_LRU, max, &count)) {
1483
bpage = UT_LIST_GET_PREV(LRU, bpage);
1485
} while (bpage != NULL && count < max);
1487
/* We keep track of all flushes happening as part of LRU
1488
flush. When estimating the desired rate at which flush_list
1489
should be flushed, we factor in this value. */
1490
buf_lru_flush_page_count += count;
1492
ut_ad(buf_pool_mutex_own(buf_pool));
1497
/*******************************************************************//**
1498
This utility flushes dirty blocks from the end of the flush_list.
1499
the calling thread is not allowed to own any latches on pages!
1500
@return number of blocks for which the write request was queued;
1501
ULINT_UNDEFINED if there was a flush of the same type already
1505
buf_flush_flush_list_batch(
1506
/*=======================*/
1507
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
1508
ulint min_n, /*!< in: wished minimum mumber
1509
of blocks flushed (it is not
1510
guaranteed that the actual
1511
number is that big, though) */
1512
ib_uint64_t lsn_limit) /*!< all blocks whose
1513
oldest_modification is smaller
1514
than this should be flushed (if
1515
their number does not exceed
1522
ut_ad(buf_pool_mutex_own(buf_pool));
1524
/* If we have flushed enough, leave the loop */
1526
/* Start from the end of the list looking for a suitable
1527
block to be flushed. */
1529
buf_flush_list_mutex_enter(buf_pool);
1531
/* We use len here because theoretically insertions can
1532
happen in the flush_list below while we are traversing
1533
it for a suitable candidate for flushing. We'd like to
1534
set a limit on how farther we are willing to traverse
1536
len = UT_LIST_GET_LEN(buf_pool->flush_list);
1537
bpage = UT_LIST_GET_LAST(buf_pool->flush_list);
1540
ut_a(bpage->oldest_modification > 0);
1543
if (!bpage || bpage->oldest_modification >= lsn_limit) {
1545
/* We have flushed enough */
1546
buf_flush_list_mutex_exit(buf_pool);
1550
ut_a(bpage->oldest_modification > 0);
1552
ut_ad(bpage->in_flush_list);
1554
buf_flush_list_mutex_exit(buf_pool);
1556
/* The list may change during the flushing and we cannot
1557
safely preserve within this function a pointer to a
1558
block in the list! */
1559
while (bpage != NULL
1561
&& !buf_flush_page_and_try_neighbors(
1562
bpage, BUF_FLUSH_LIST, min_n, &count)) {
1564
buf_flush_list_mutex_enter(buf_pool);
1566
/* If we are here that means that buf_pool->mutex
1567
was not released in buf_flush_page_and_try_neighbors()
1568
above and this guarantees that bpage didn't get
1569
relocated since we released the flush_list
1570
mutex above. There is a chance, however, that
1571
the bpage got removed from flush_list (not
1572
currently possible because flush_list_remove()
1573
also obtains buf_pool mutex but that may change
1574
in future). To avoid this scenario we check
1575
the oldest_modification and if it is zero
1576
we start all over again. */
1577
if (bpage->oldest_modification == 0) {
1578
buf_flush_list_mutex_exit(buf_pool);
1582
bpage = UT_LIST_GET_PREV(list, bpage);
1584
ut_ad(!bpage || bpage->in_flush_list);
1586
buf_flush_list_mutex_exit(buf_pool);
1591
} while (count < min_n && bpage != NULL && len > 0);
1593
ut_ad(buf_pool_mutex_own(buf_pool));
1232
1615
ulint min_n, /*!< in: wished minimum mumber of blocks
1233
1616
flushed (it is not guaranteed that the
1234
1617
actual number is that big, though) */
1235
ib_uint64_t lsn_limit) /*!< in the case BUF_FLUSH_LIST all
1236
blocks whose oldest_modification is
1618
ib_uint64_t lsn_limit) /*!< in: in the case of BUF_FLUSH_LIST
1619
all blocks whose oldest_modification is
1237
1620
smaller than this should be flushed
1238
1621
(if their number does not exceed
1239
1622
min_n), otherwise ignored */
1242
ulint page_count = 0;
1243
ulint old_page_count;
1247
ut_ad((flush_type == BUF_FLUSH_LRU)
1248
|| (flush_type == BUF_FLUSH_LIST));
1626
ut_ad(flush_type == BUF_FLUSH_LRU || flush_type == BUF_FLUSH_LIST);
1249
1627
#ifdef UNIV_SYNC_DEBUG
1250
1628
ut_ad((flush_type != BUF_FLUSH_LIST)
1251
1629
|| sync_thread_levels_empty_gen(TRUE));
1252
1630
#endif /* UNIV_SYNC_DEBUG */
1253
buf_pool_mutex_enter();
1255
if ((buf_pool->n_flush[flush_type] > 0)
1256
|| (buf_pool->init_flush[flush_type] == TRUE)) {
1258
/* There is already a flush batch of the same type running */
1260
buf_pool_mutex_exit();
1262
return(ULINT_UNDEFINED);
1265
buf_pool->init_flush[flush_type] = TRUE;
1267
bool done_with_loop= false;
1268
for (;done_with_loop != true;) {
1270
/* If we have flushed enough, leave the loop */
1271
if (page_count >= min_n) {
1276
/* Start from the end of the list looking for a suitable
1277
block to be flushed. */
1279
if (flush_type == BUF_FLUSH_LRU) {
1280
bpage = UT_LIST_GET_LAST(buf_pool->LRU);
1282
ut_ad(flush_type == BUF_FLUSH_LIST);
1284
bpage = UT_LIST_GET_LAST(buf_pool->flush_list);
1286
|| bpage->oldest_modification >= lsn_limit) {
1287
/* We have flushed enough */
1291
ut_ad(bpage->in_flush_list);
1294
/* Note that after finding a single flushable page, we try to
1295
flush also all its neighbors, and after that start from the
1296
END of the LRU list or flush list again: the list may change
1297
during the flushing and we cannot safely preserve within this
1298
function a pointer to a block in the list! */
1301
mutex_t*block_mutex = buf_page_get_mutex(bpage);
1304
ut_a(buf_page_in_file(bpage));
1306
mutex_enter(block_mutex);
1307
ready = buf_flush_ready_for_flush(bpage, flush_type);
1308
mutex_exit(block_mutex);
1311
space = buf_page_get_space(bpage);
1312
offset = buf_page_get_page_no(bpage);
1314
buf_pool_mutex_exit();
1316
old_page_count = page_count;
1318
/* Try to flush also all the neighbors */
1319
page_count += buf_flush_try_neighbors(
1320
space, offset, flush_type);
1322
"Flush type %lu, page no %lu, neighb %lu\n",
1324
page_count - old_page_count); */
1326
buf_pool_mutex_enter();
1329
} else if (flush_type == BUF_FLUSH_LRU) {
1330
bpage = UT_LIST_GET_PREV(LRU, bpage);
1332
ut_ad(flush_type == BUF_FLUSH_LIST);
1334
bpage = UT_LIST_GET_PREV(list, bpage);
1335
ut_ad(!bpage || bpage->in_flush_list);
1337
} while (bpage != NULL);
1339
/* If we could not find anything to flush, leave the loop */
1341
done_with_loop= true;
1345
buf_pool->init_flush[flush_type] = FALSE;
1347
if (buf_pool->n_flush[flush_type] == 0) {
1349
/* The running flush batch has ended */
1351
os_event_set(buf_pool->no_flush[flush_type]);
1354
buf_pool_mutex_exit();
1356
buf_flush_buffered_writes();
1632
buf_pool_mutex_enter(buf_pool);
1634
/* Note: The buffer pool mutex is released and reacquired within
1635
the flush functions. */
1636
switch(flush_type) {
1638
count = buf_flush_LRU_list_batch(buf_pool, min_n);
1640
case BUF_FLUSH_LIST:
1641
count = buf_flush_flush_list_batch(buf_pool, min_n, lsn_limit);
1647
buf_pool_mutex_exit(buf_pool);
1649
buf_flush_buffered_writes();
1652
if (buf_debug_prints && count > 0) {
1653
fprintf(stderr, flush_type == BUF_FLUSH_LRU
1654
? "Flushed %lu pages in LRU flush\n"
1655
: "Flushed %lu pages in flush list flush\n",
1658
#endif /* UNIV_DEBUG */
1660
srv_buf_pool_flushed += count;
1665
/******************************************************************//**
1666
Gather the aggregated stats for both flush list and LRU list flushing */
1671
enum buf_flush flush_type, /*!< in: type of flush */
1672
ulint page_count) /*!< in: number of pages flushed */
1674
buf_flush_buffered_writes();
1676
ut_a(flush_type == BUF_FLUSH_LRU || flush_type == BUF_FLUSH_LIST);
1358
1678
#ifdef UNIV_DEBUG
1359
1679
if (buf_debug_prints && page_count > 0) {
1360
ut_a(flush_type == BUF_FLUSH_LRU
1361
|| flush_type == BUF_FLUSH_LIST);
1362
1680
fprintf(stderr, flush_type == BUF_FLUSH_LRU
1363
1681
? "Flushed %lu pages in LRU flush\n"
1364
1682
: "Flushed %lu pages in flush list flush\n",
1369
1687
srv_buf_pool_flushed += page_count;
1371
/* We keep track of all flushes happening as part of LRU
1372
flush. When estimating the desired rate at which flush_list
1373
should be flushed we factor in this value. */
1374
1689
if (flush_type == BUF_FLUSH_LRU) {
1690
/* We keep track of all flushes happening as part of LRU
1691
flush. When estimating the desired rate at which flush_list
1692
should be flushed we factor in this value. */
1375
1693
buf_lru_flush_page_count += page_count;
1697
/******************************************************************//**
1698
Start a buffer flush batch for LRU or flush list */
1703
buf_pool_t* buf_pool, /*!< buffer pool instance */
1704
enum buf_flush flush_type) /*!< in: BUF_FLUSH_LRU
1705
or BUF_FLUSH_LIST */
1707
buf_pool_mutex_enter(buf_pool);
1709
if (buf_pool->n_flush[flush_type] > 0
1710
|| buf_pool->init_flush[flush_type] == TRUE) {
1712
/* There is already a flush batch of the same type running */
1714
buf_pool_mutex_exit(buf_pool);
1719
buf_pool->init_flush[flush_type] = TRUE;
1721
buf_pool_mutex_exit(buf_pool);
1726
/******************************************************************//**
1727
End a buffer flush batch for LRU or flush list */
1732
buf_pool_t* buf_pool, /*!< buffer pool instance */
1733
enum buf_flush flush_type) /*!< in: BUF_FLUSH_LRU
1734
or BUF_FLUSH_LIST */
1736
buf_pool_mutex_enter(buf_pool);
1738
buf_pool->init_flush[flush_type] = FALSE;
1740
if (buf_pool->n_flush[flush_type] == 0) {
1742
/* The running flush batch has ended */
1744
os_event_set(buf_pool->no_flush[flush_type]);
1747
buf_pool_mutex_exit(buf_pool);
1381
1750
/******************************************************************//**
1385
1754
buf_flush_wait_batch_end(
1386
1755
/*=====================*/
1387
enum buf_flush type) /*!< in: BUF_FLUSH_LRU or BUF_FLUSH_LIST */
1389
ut_ad((type == BUF_FLUSH_LRU) || (type == BUF_FLUSH_LIST));
1391
os_event_wait(buf_pool->no_flush[type]);
1756
buf_pool_t* buf_pool, /*!< buffer pool instance */
1757
enum buf_flush type) /*!< in: BUF_FLUSH_LRU
1758
or BUF_FLUSH_LIST */
1760
ut_ad(type == BUF_FLUSH_LRU || type == BUF_FLUSH_LIST);
1762
if (buf_pool == NULL) {
1765
for (i = 0; i < srv_buf_pool_instances; ++i) {
1766
buf_pool_t* buf_pool;
1768
buf_pool = buf_pool_from_array(i);
1770
os_event_wait(buf_pool->no_flush[type]);
1773
os_event_wait(buf_pool->no_flush[type]);
1777
/*******************************************************************//**
1778
This utility flushes dirty blocks from the end of the LRU list.
1779
NOTE: The calling thread may own latches to pages: to avoid deadlocks,
1780
this function must be written so that it cannot end up waiting for these
1782
@return number of blocks for which the write request was queued;
1783
ULINT_UNDEFINED if there was a flush of the same type already running */
1788
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
1789
ulint min_n) /*!< in: wished minimum mumber of blocks
1790
flushed (it is not guaranteed that the
1791
actual number is that big, though) */
1795
if (!buf_flush_start(buf_pool, BUF_FLUSH_LRU)) {
1796
return(ULINT_UNDEFINED);
1799
page_count = buf_flush_batch(buf_pool, BUF_FLUSH_LRU, min_n, 0);
1801
buf_flush_end(buf_pool, BUF_FLUSH_LRU);
1803
buf_flush_common(BUF_FLUSH_LRU, page_count);
1808
/*******************************************************************//**
1809
This utility flushes dirty blocks from the end of the flush list of
1810
all buffer pool instances.
1811
NOTE: The calling thread is not allowed to own any latches on pages!
1812
@return number of blocks for which the write request was queued;
1813
ULINT_UNDEFINED if there was a flush of the same type already running */
1818
ulint min_n, /*!< in: wished minimum mumber of blocks
1819
flushed (it is not guaranteed that the
1820
actual number is that big, though) */
1821
ib_uint64_t lsn_limit) /*!< in the case BUF_FLUSH_LIST all
1822
blocks whose oldest_modification is
1823
smaller than this should be flushed
1824
(if their number does not exceed
1825
min_n), otherwise ignored */
1828
ulint total_page_count = 0;
1829
ibool skipped = FALSE;
1831
if (min_n != ULINT_MAX) {
1832
/* Ensure that flushing is spread evenly amongst the
1833
buffer pool instances. When min_n is ULINT_MAX
1834
we need to flush everything up to the lsn limit
1835
so no limit here. */
1836
min_n = (min_n + srv_buf_pool_instances - 1)
1837
/ srv_buf_pool_instances;
1840
/* Flush to lsn_limit in all buffer pool instances */
1841
for (i = 0; i < srv_buf_pool_instances; i++) {
1842
buf_pool_t* buf_pool;
1843
ulint page_count = 0;
1845
buf_pool = buf_pool_from_array(i);
1847
if (!buf_flush_start(buf_pool, BUF_FLUSH_LIST)) {
1848
/* We have two choices here. If lsn_limit was
1849
specified then skipping an instance of buffer
1850
pool means we cannot guarantee that all pages
1851
up to lsn_limit has been flushed. We can
1852
return right now with failure or we can try
1853
to flush remaining buffer pools up to the
1854
lsn_limit. We attempt to flush other buffer
1855
pools based on the assumption that it will
1856
help in the retry which will follow the
1863
page_count = buf_flush_batch(
1864
buf_pool, BUF_FLUSH_LIST, min_n, lsn_limit);
1866
buf_flush_end(buf_pool, BUF_FLUSH_LIST);
1868
buf_flush_common(BUF_FLUSH_LIST, page_count);
1870
total_page_count += page_count;
1873
return(lsn_limit != IB_ULONGLONG_MAX && skipped
1874
? ULINT_UNDEFINED : total_page_count);
1394
1877
/******************************************************************//**
1395
1878
Gives a recommendation of how many blocks should be flushed to establish
1396
1879
a big enough margin of replaceable blocks near the end of the LRU list
1451
1936
immediately, without waiting. */
1454
buf_flush_free_margin(void)
1455
/*=======================*/
1939
buf_flush_free_margin(
1940
/*==================*/
1941
buf_pool_t* buf_pool) /*!< in: Buffer pool instance */
1457
1943
ulint n_to_flush;
1460
n_to_flush = buf_flush_LRU_recommendation();
1945
n_to_flush = buf_flush_LRU_recommendation(buf_pool);
1462
1947
if (n_to_flush > 0) {
1463
n_flushed = buf_flush_batch(BUF_FLUSH_LRU, n_to_flush, 0);
1950
n_flushed = buf_flush_LRU(buf_pool, n_to_flush);
1464
1952
if (n_flushed == ULINT_UNDEFINED) {
1465
1953
/* There was an LRU type flush batch already running;
1466
1954
let us wait for it to end */
1468
buf_flush_wait_batch_end(BUF_FLUSH_LRU);
1956
buf_flush_wait_batch_end(buf_pool, BUF_FLUSH_LRU);
1961
/*********************************************************************//**
1962
Flushes pages from the end of all the LRU lists. */
1965
buf_flush_free_margins(void)
1966
/*========================*/
1970
for (i = 0; i < srv_buf_pool_instances; i++) {
1971
buf_pool_t* buf_pool;
1973
buf_pool = buf_pool_from_array(i);
1975
buf_flush_free_margin(buf_pool);
1473
1979
/*********************************************************************
1474
1980
Update the historical stats that we are collecting for flush rate
1475
1981
heuristics at the end of each interval.