~ubuntu-branches/ubuntu/quantal/linux-linaro-mx51/quantal

« back to all changes in this revision

Viewing changes to mm/vmscan.c

  • Committer: Package Import Robot
  • Author(s): John Rigby, John Rigby
  • Date: 2011-09-26 10:44:23 UTC
  • Revision ID: package-import@ubuntu.com-20110926104423-3o58a3c1bj7x00rs
Tags: 3.0.0-1007.9
[ John Rigby ]

Enable crypto modules and remove crypto-modules from
exclude-module files
LP: #826021

Show diffs side-by-side

added added

removed removed

Lines of Context:
42
42
#include <linux/delayacct.h>
43
43
#include <linux/sysctl.h>
44
44
#include <linux/oom.h>
 
45
#include <linux/prefetch.h>
45
46
 
46
47
#include <asm/tlbflush.h>
47
48
#include <asm/div64.h>
172
173
                                struct scan_control *sc, enum lru_list lru)
173
174
{
174
175
        if (!scanning_global_lru(sc))
175
 
                return mem_cgroup_zone_nr_pages(sc->mem_cgroup, zone, lru);
 
176
                return mem_cgroup_zone_nr_lru_pages(sc->mem_cgroup, zone, lru);
176
177
 
177
178
        return zone_page_state(zone, NR_LRU_BASE + lru);
178
179
}
201
202
}
202
203
EXPORT_SYMBOL(unregister_shrinker);
203
204
 
 
205
static inline int do_shrinker_shrink(struct shrinker *shrinker,
 
206
                                     struct shrink_control *sc,
 
207
                                     unsigned long nr_to_scan)
 
208
{
 
209
        sc->nr_to_scan = nr_to_scan;
 
210
        return (*shrinker->shrink)(shrinker, sc);
 
211
}
 
212
 
204
213
#define SHRINK_BATCH 128
205
214
/*
206
215
 * Call the shrink functions to age shrinkable caches
221
230
 *
222
231
 * Returns the number of slab objects which we shrunk.
223
232
 */
224
 
unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
225
 
                        unsigned long lru_pages)
 
233
unsigned long shrink_slab(struct shrink_control *shrink,
 
234
                          unsigned long nr_pages_scanned,
 
235
                          unsigned long lru_pages)
226
236
{
227
237
        struct shrinker *shrinker;
228
238
        unsigned long ret = 0;
229
239
 
230
 
        if (scanned == 0)
231
 
                scanned = SWAP_CLUSTER_MAX;
 
240
        if (nr_pages_scanned == 0)
 
241
                nr_pages_scanned = SWAP_CLUSTER_MAX;
232
242
 
233
243
        if (!down_read_trylock(&shrinker_rwsem)) {
234
244
                /* Assume we'll be able to shrink next time */
241
251
                unsigned long total_scan;
242
252
                unsigned long max_pass;
243
253
 
244
 
                max_pass = (*shrinker->shrink)(shrinker, 0, gfp_mask);
245
 
                delta = (4 * scanned) / shrinker->seeks;
 
254
                max_pass = do_shrinker_shrink(shrinker, shrink, 0);
 
255
                delta = (4 * nr_pages_scanned) / shrinker->seeks;
246
256
                delta *= max_pass;
247
257
                do_div(delta, lru_pages + 1);
248
258
                shrinker->nr += delta;
269
279
                        int shrink_ret;
270
280
                        int nr_before;
271
281
 
272
 
                        nr_before = (*shrinker->shrink)(shrinker, 0, gfp_mask);
273
 
                        shrink_ret = (*shrinker->shrink)(shrinker, this_scan,
274
 
                                                                gfp_mask);
 
282
                        nr_before = do_shrinker_shrink(shrinker, shrink, 0);
 
283
                        shrink_ret = do_shrinker_shrink(shrinker, shrink,
 
284
                                                        this_scan);
275
285
                        if (shrink_ret == -1)
276
286
                                break;
277
287
                        if (shrink_ret < nr_before)
1114
1124
                                        nr_lumpy_dirty++;
1115
1125
                                scan++;
1116
1126
                        } else {
1117
 
                                /* the page is freed already. */
1118
 
                                if (!page_count(cursor_page))
 
1127
                                /*
 
1128
                                 * Check if the page is freed already.
 
1129
                                 *
 
1130
                                 * We can't use page_count() as that
 
1131
                                 * requires compound_head and we don't
 
1132
                                 * have a pin on the page here. If a
 
1133
                                 * page is tail, we may or may not
 
1134
                                 * have isolated the head, so assume
 
1135
                                 * it's not free, it'd be tricky to
 
1136
                                 * track the head status without a
 
1137
                                 * page pin.
 
1138
                                 */
 
1139
                                if (!PageTail(cursor_page) &&
 
1140
                                    !atomic_read(&cursor_page->_count))
1119
1141
                                        continue;
1120
1142
                                break;
1121
1143
                        }
1206
1228
{
1207
1229
        int ret = -EBUSY;
1208
1230
 
 
1231
        VM_BUG_ON(!page_count(page));
 
1232
 
1209
1233
        if (PageLRU(page)) {
1210
1234
                struct zone *zone = page_zone(page);
1211
1235
 
1212
1236
                spin_lock_irq(&zone->lru_lock);
1213
 
                if (PageLRU(page) && get_page_unless_zero(page)) {
 
1237
                if (PageLRU(page)) {
1214
1238
                        int lru = page_lru(page);
1215
1239
                        ret = 0;
 
1240
                        get_page(page);
1216
1241
                        ClearPageLRU(page);
1217
1242
 
1218
1243
                        del_page_from_lru_list(zone, page, lru);
1705
1730
}
1706
1731
 
1707
1732
/*
1708
 
 * Smallish @nr_to_scan's are deposited in @nr_saved_scan,
1709
 
 * until we collected @swap_cluster_max pages to scan.
1710
 
 */
1711
 
static unsigned long nr_scan_try_batch(unsigned long nr_to_scan,
1712
 
                                       unsigned long *nr_saved_scan)
1713
 
{
1714
 
        unsigned long nr;
1715
 
 
1716
 
        *nr_saved_scan += nr_to_scan;
1717
 
        nr = *nr_saved_scan;
1718
 
 
1719
 
        if (nr >= SWAP_CLUSTER_MAX)
1720
 
                *nr_saved_scan = 0;
1721
 
        else
1722
 
                nr = 0;
1723
 
 
1724
 
        return nr;
1725
 
}
1726
 
 
1727
 
/*
1728
1733
 * Determine how aggressively the anon and file LRU lists should be
1729
1734
 * scanned.  The relative value of each set of LRU lists is determined
1730
1735
 * by looking at the fraction of the pages scanned we did rotate back
1742
1747
        u64 fraction[2], denominator;
1743
1748
        enum lru_list l;
1744
1749
        int noswap = 0;
 
1750
        int force_scan = 0;
 
1751
 
 
1752
 
 
1753
        anon  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
 
1754
                zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
 
1755
        file  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) +
 
1756
                zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
 
1757
 
 
1758
        if (((anon + file) >> priority) < SWAP_CLUSTER_MAX) {
 
1759
                /* kswapd does zone balancing and need to scan this zone */
 
1760
                if (scanning_global_lru(sc) && current_is_kswapd())
 
1761
                        force_scan = 1;
 
1762
                /* memcg may have small limit and need to avoid priority drop */
 
1763
                if (!scanning_global_lru(sc))
 
1764
                        force_scan = 1;
 
1765
        }
1745
1766
 
1746
1767
        /* If we have no swap space, do not bother scanning anon pages. */
1747
1768
        if (!sc->may_swap || (nr_swap_pages <= 0)) {
1752
1773
                goto out;
1753
1774
        }
1754
1775
 
1755
 
        anon  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
1756
 
                zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
1757
 
        file  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) +
1758
 
                zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
1759
 
 
1760
1776
        if (scanning_global_lru(sc)) {
1761
1777
                free  = zone_page_state(zone, NR_FREE_PAGES);
1762
1778
                /* If we have very few page cache pages,
1823
1839
                        scan >>= priority;
1824
1840
                        scan = div64_u64(scan * fraction[file], denominator);
1825
1841
                }
1826
 
                nr[l] = nr_scan_try_batch(scan,
1827
 
                                          &reclaim_stat->nr_saved_scan[l]);
 
1842
 
 
1843
                /*
 
1844
                 * If zone is small or memcg is small, nr[l] can be 0.
 
1845
                 * This results no-scan on this priority and priority drop down.
 
1846
                 * For global direct reclaim, it can visit next zone and tend
 
1847
                 * not to have problems. For global kswapd, it's for zone
 
1848
                 * balancing and it need to scan a small amounts. When using
 
1849
                 * memcg, priority drop can cause big latency. So, it's better
 
1850
                 * to scan small amount. See may_noscan above.
 
1851
                 */
 
1852
                if (!scan && force_scan) {
 
1853
                        if (file)
 
1854
                                scan = SWAP_CLUSTER_MAX;
 
1855
                        else if (!noswap)
 
1856
                                scan = SWAP_CLUSTER_MAX;
 
1857
                }
 
1858
                nr[l] = scan;
1828
1859
        }
1829
1860
}
1830
1861
 
1969
2000
{
1970
2001
        struct zoneref *z;
1971
2002
        struct zone *zone;
 
2003
        unsigned long nr_soft_reclaimed;
 
2004
        unsigned long nr_soft_scanned;
1972
2005
 
1973
2006
        for_each_zone_zonelist_nodemask(zone, z, zonelist,
1974
2007
                                        gfp_zone(sc->gfp_mask), sc->nodemask) {
1983
2016
                                continue;
1984
2017
                        if (zone->all_unreclaimable && priority != DEF_PRIORITY)
1985
2018
                                continue;       /* Let kswapd poll it */
 
2019
                        /*
 
2020
                         * This steals pages from memory cgroups over softlimit
 
2021
                         * and returns the number of reclaimed pages and
 
2022
                         * scanned pages. This works for global memory pressure
 
2023
                         * and balancing, not for a memcg's limit.
 
2024
                         */
 
2025
                        nr_soft_scanned = 0;
 
2026
                        nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
 
2027
                                                sc->order, sc->gfp_mask,
 
2028
                                                &nr_soft_scanned);
 
2029
                        sc->nr_reclaimed += nr_soft_reclaimed;
 
2030
                        sc->nr_scanned += nr_soft_scanned;
 
2031
                        /* need some check for avoid more shrink_zone() */
1986
2032
                }
1987
2033
 
1988
2034
                shrink_zone(priority, zone, sc);
2031
2077
 *              else, the number of pages reclaimed
2032
2078
 */
2033
2079
static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
2034
 
                                        struct scan_control *sc)
 
2080
                                        struct scan_control *sc,
 
2081
                                        struct shrink_control *shrink)
2035
2082
{
2036
2083
        int priority;
2037
2084
        unsigned long total_scanned = 0;
2049
2096
        for (priority = DEF_PRIORITY; priority >= 0; priority--) {
2050
2097
                sc->nr_scanned = 0;
2051
2098
                if (!priority)
2052
 
                        disable_swap_token();
 
2099
                        disable_swap_token(sc->mem_cgroup);
2053
2100
                shrink_zones(priority, zonelist, sc);
2054
2101
                /*
2055
2102
                 * Don't shrink slabs when reclaiming memory from
2065
2112
                                lru_pages += zone_reclaimable_pages(zone);
2066
2113
                        }
2067
2114
 
2068
 
                        shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages);
 
2115
                        shrink_slab(shrink, sc->nr_scanned, lru_pages);
2069
2116
                        if (reclaim_state) {
2070
2117
                                sc->nr_reclaimed += reclaim_state->reclaimed_slab;
2071
2118
                                reclaim_state->reclaimed_slab = 0;
2137
2184
                .mem_cgroup = NULL,
2138
2185
                .nodemask = nodemask,
2139
2186
        };
 
2187
        struct shrink_control shrink = {
 
2188
                .gfp_mask = sc.gfp_mask,
 
2189
        };
2140
2190
 
2141
2191
        trace_mm_vmscan_direct_reclaim_begin(order,
2142
2192
                                sc.may_writepage,
2143
2193
                                gfp_mask);
2144
2194
 
2145
 
        nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
 
2195
        nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
2146
2196
 
2147
2197
        trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
2148
2198
 
2154
2204
unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
2155
2205
                                                gfp_t gfp_mask, bool noswap,
2156
2206
                                                unsigned int swappiness,
2157
 
                                                struct zone *zone)
 
2207
                                                struct zone *zone,
 
2208
                                                unsigned long *nr_scanned)
2158
2209
{
2159
2210
        struct scan_control sc = {
 
2211
                .nr_scanned = 0,
2160
2212
                .nr_to_reclaim = SWAP_CLUSTER_MAX,
2161
2213
                .may_writepage = !laptop_mode,
2162
2214
                .may_unmap = 1,
2165
2217
                .order = 0,
2166
2218
                .mem_cgroup = mem,
2167
2219
        };
 
2220
 
2168
2221
        sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2169
2222
                        (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
2170
2223
 
2183
2236
 
2184
2237
        trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
2185
2238
 
 
2239
        *nr_scanned = sc.nr_scanned;
2186
2240
        return sc.nr_reclaimed;
2187
2241
}
2188
2242
 
2193
2247
{
2194
2248
        struct zonelist *zonelist;
2195
2249
        unsigned long nr_reclaimed;
 
2250
        int nid;
2196
2251
        struct scan_control sc = {
2197
2252
                .may_writepage = !laptop_mode,
2198
2253
                .may_unmap = 1,
2202
2257
                .order = 0,
2203
2258
                .mem_cgroup = mem_cont,
2204
2259
                .nodemask = NULL, /* we don't care the placement */
2205
 
        };
2206
 
 
2207
 
        sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2208
 
                        (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
2209
 
        zonelist = NODE_DATA(numa_node_id())->node_zonelists;
 
2260
                .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
 
2261
                                (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
 
2262
        };
 
2263
        struct shrink_control shrink = {
 
2264
                .gfp_mask = sc.gfp_mask,
 
2265
        };
 
2266
 
 
2267
        /*
 
2268
         * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't
 
2269
         * take care of from where we get pages. So the node where we start the
 
2270
         * scan does not need to be the current node.
 
2271
         */
 
2272
        nid = mem_cgroup_select_victim_node(mem_cont);
 
2273
 
 
2274
        zonelist = NODE_DATA(nid)->node_zonelists;
2210
2275
 
2211
2276
        trace_mm_vmscan_memcg_reclaim_begin(0,
2212
2277
                                            sc.may_writepage,
2213
2278
                                            sc.gfp_mask);
2214
2279
 
2215
 
        nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
 
2280
        nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
2216
2281
 
2217
2282
        trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
2218
2283
 
2245
2310
        for (i = 0; i <= classzone_idx; i++)
2246
2311
                present_pages += pgdat->node_zones[i].present_pages;
2247
2312
 
2248
 
        return balanced_pages > (present_pages >> 2);
 
2313
        /* A special case here: if zone has no page, we think it's balanced */
 
2314
        return balanced_pages >= (present_pages >> 2);
2249
2315
}
2250
2316
 
2251
2317
/* is kswapd sleeping prematurely? */
2261
2327
                return true;
2262
2328
 
2263
2329
        /* Check the watermark levels */
2264
 
        for (i = 0; i < pgdat->nr_zones; i++) {
 
2330
        for (i = 0; i <= classzone_idx; i++) {
2265
2331
                struct zone *zone = pgdat->node_zones + i;
2266
2332
 
2267
2333
                if (!populated_zone(zone))
2279
2345
                }
2280
2346
 
2281
2347
                if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone),
2282
 
                                                        classzone_idx, 0))
 
2348
                                                        i, 0))
2283
2349
                        all_zones_ok = false;
2284
2350
                else
2285
2351
                        balanced += zone->present_pages;
2327
2393
        int end_zone = 0;       /* Inclusive.  0 = ZONE_DMA */
2328
2394
        unsigned long total_scanned;
2329
2395
        struct reclaim_state *reclaim_state = current->reclaim_state;
 
2396
        unsigned long nr_soft_reclaimed;
 
2397
        unsigned long nr_soft_scanned;
2330
2398
        struct scan_control sc = {
2331
2399
                .gfp_mask = GFP_KERNEL,
2332
2400
                .may_unmap = 1,
2340
2408
                .order = order,
2341
2409
                .mem_cgroup = NULL,
2342
2410
        };
 
2411
        struct shrink_control shrink = {
 
2412
                .gfp_mask = sc.gfp_mask,
 
2413
        };
2343
2414
loop_again:
2344
2415
        total_scanned = 0;
2345
2416
        sc.nr_reclaimed = 0;
2352
2423
 
2353
2424
                /* The swap token gets in the way of swapout... */
2354
2425
                if (!priority)
2355
 
                        disable_swap_token();
 
2426
                        disable_swap_token(NULL);
2356
2427
 
2357
2428
                all_zones_ok = 1;
2358
2429
                balanced = 0;
2381
2452
                        if (!zone_watermark_ok_safe(zone, order,
2382
2453
                                        high_wmark_pages(zone), 0, 0)) {
2383
2454
                                end_zone = i;
2384
 
                                *classzone_idx = i;
2385
2455
                                break;
2386
2456
                        }
2387
2457
                }
2416
2486
 
2417
2487
                        sc.nr_scanned = 0;
2418
2488
 
 
2489
                        nr_soft_scanned = 0;
2419
2490
                        /*
2420
2491
                         * Call soft limit reclaim before calling shrink_zone.
2421
 
                         * For now we ignore the return value
2422
2492
                         */
2423
 
                        mem_cgroup_soft_limit_reclaim(zone, order, sc.gfp_mask);
 
2493
                        nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
 
2494
                                                        order, sc.gfp_mask,
 
2495
                                                        &nr_soft_scanned);
 
2496
                        sc.nr_reclaimed += nr_soft_reclaimed;
 
2497
                        total_scanned += nr_soft_scanned;
2424
2498
 
2425
2499
                        /*
2426
2500
                         * We put equal pressure on every zone, unless
2436
2510
                                KSWAPD_ZONE_BALANCE_GAP_RATIO);
2437
2511
                        if (!zone_watermark_ok_safe(zone, order,
2438
2512
                                        high_wmark_pages(zone) + balance_gap,
2439
 
                                        end_zone, 0))
 
2513
                                        end_zone, 0)) {
2440
2514
                                shrink_zone(priority, zone, &sc);
2441
 
                        reclaim_state->reclaimed_slab = 0;
2442
 
                        nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
2443
 
                                                lru_pages);
2444
 
                        sc.nr_reclaimed += reclaim_state->reclaimed_slab;
2445
 
                        total_scanned += sc.nr_scanned;
2446
 
 
2447
 
                        if (zone->all_unreclaimable)
2448
 
                                continue;
2449
 
                        if (nr_slab == 0 &&
2450
 
                            !zone_reclaimable(zone))
2451
 
                                zone->all_unreclaimable = 1;
 
2515
 
 
2516
                                reclaim_state->reclaimed_slab = 0;
 
2517
                                nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages);
 
2518
                                sc.nr_reclaimed += reclaim_state->reclaimed_slab;
 
2519
                                total_scanned += sc.nr_scanned;
 
2520
 
 
2521
                                if (nr_slab == 0 && !zone_reclaimable(zone))
 
2522
                                        zone->all_unreclaimable = 1;
 
2523
                        }
 
2524
 
2452
2525
                        /*
2453
2526
                         * If we've done a decent amount of scanning and
2454
2527
                         * the reclaim ratio is low, start doing writepage
2458
2531
                            total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
2459
2532
                                sc.may_writepage = 1;
2460
2533
 
 
2534
                        if (zone->all_unreclaimable) {
 
2535
                                if (end_zone && end_zone == i)
 
2536
                                        end_zone--;
 
2537
                                continue;
 
2538
                        }
 
2539
 
2461
2540
                        if (!zone_watermark_ok_safe(zone, order,
2462
2541
                                        high_wmark_pages(zone), end_zone, 0)) {
2463
2542
                                all_zones_ok = 0;
2636
2715
 */
2637
2716
static int kswapd(void *p)
2638
2717
{
2639
 
        unsigned long order;
2640
 
        int classzone_idx;
 
2718
        unsigned long order, new_order;
 
2719
        int classzone_idx, new_classzone_idx;
2641
2720
        pg_data_t *pgdat = (pg_data_t*)p;
2642
2721
        struct task_struct *tsk = current;
2643
2722
 
2667
2746
        tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
2668
2747
        set_freezable();
2669
2748
 
2670
 
        order = 0;
2671
 
        classzone_idx = MAX_NR_ZONES - 1;
 
2749
        order = new_order = 0;
 
2750
        classzone_idx = new_classzone_idx = pgdat->nr_zones - 1;
2672
2751
        for ( ; ; ) {
2673
 
                unsigned long new_order;
2674
 
                int new_classzone_idx;
2675
2752
                int ret;
2676
2753
 
2677
 
                new_order = pgdat->kswapd_max_order;
2678
 
                new_classzone_idx = pgdat->classzone_idx;
2679
 
                pgdat->kswapd_max_order = 0;
2680
 
                pgdat->classzone_idx = MAX_NR_ZONES - 1;
 
2754
                /*
 
2755
                 * If the last balance_pgdat was unsuccessful it's unlikely a
 
2756
                 * new request of a similar or harder type will succeed soon
 
2757
                 * so consider going to sleep on the basis we reclaimed at
 
2758
                 */
 
2759
                if (classzone_idx >= new_classzone_idx && order == new_order) {
 
2760
                        new_order = pgdat->kswapd_max_order;
 
2761
                        new_classzone_idx = pgdat->classzone_idx;
 
2762
                        pgdat->kswapd_max_order =  0;
 
2763
                        pgdat->classzone_idx = pgdat->nr_zones - 1;
 
2764
                }
 
2765
 
2681
2766
                if (order < new_order || classzone_idx > new_classzone_idx) {
2682
2767
                        /*
2683
2768
                         * Don't sleep if someone wants a larger 'order'
2690
2775
                        order = pgdat->kswapd_max_order;
2691
2776
                        classzone_idx = pgdat->classzone_idx;
2692
2777
                        pgdat->kswapd_max_order = 0;
2693
 
                        pgdat->classzone_idx = MAX_NR_ZONES - 1;
 
2778
                        pgdat->classzone_idx = pgdat->nr_zones - 1;
2694
2779
                }
2695
2780
 
2696
2781
                ret = try_to_freeze();
2792
2877
                .swappiness = vm_swappiness,
2793
2878
                .order = 0,
2794
2879
        };
2795
 
        struct zonelist * zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
 
2880
        struct shrink_control shrink = {
 
2881
                .gfp_mask = sc.gfp_mask,
 
2882
        };
 
2883
        struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
2796
2884
        struct task_struct *p = current;
2797
2885
        unsigned long nr_reclaimed;
2798
2886
 
2801
2889
        reclaim_state.reclaimed_slab = 0;
2802
2890
        p->reclaim_state = &reclaim_state;
2803
2891
 
2804
 
        nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
 
2892
        nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
2805
2893
 
2806
2894
        p->reclaim_state = NULL;
2807
2895
        lockdep_clear_current_reclaim_state();
2976
3064
                .swappiness = vm_swappiness,
2977
3065
                .order = order,
2978
3066
        };
 
3067
        struct shrink_control shrink = {
 
3068
                .gfp_mask = sc.gfp_mask,
 
3069
        };
2979
3070
        unsigned long nr_slab_pages0, nr_slab_pages1;
2980
3071
 
2981
3072
        cond_resched();
3017
3108
                        unsigned long lru_pages = zone_reclaimable_pages(zone);
3018
3109
 
3019
3110
                        /* No reclaimable slab or very low memory pressure */
3020
 
                        if (!shrink_slab(sc.nr_scanned, gfp_mask, lru_pages))
 
3111
                        if (!shrink_slab(&shrink, sc.nr_scanned, lru_pages))
3021
3112
                                break;
3022
3113
 
3023
3114
                        /* Freed enough memory */