27
28
#include <linux/memory.h>
28
29
#include <linux/math64.h>
29
30
#include <linux/fault-inject.h>
31
#include <linux/stacktrace.h>
31
33
#include <trace/events/kmem.h>
38
* The slab_lock protects operations on the object of a particular
39
* slab and its metadata in the page struct. If the slab lock
40
* has been taken then no allocations nor frees can be performed
41
* on the objects in the slab nor can the slab be added or removed
42
* from the partial or full lists since this would mean modifying
43
* the page_struct of the slab.
37
* 1. slub_lock (Global Semaphore)
39
* 3. slab_lock(page) (Only on some arches and for debugging)
43
* The role of the slub_lock is to protect the list of all the slabs
44
* and to synchronize major metadata changes to slab cache structures.
46
* The slab_lock is only used for debugging and on arches that do not
47
* have the ability to do a cmpxchg_double. It only protects the second
48
* double word in the page struct. Meaning
49
* A. page->freelist -> List of object free in a page
50
* B. page->counters -> Counters of objects
51
* C. page->frozen -> frozen state
53
* If a slab is frozen then it is exempt from list management. It is not
54
* on any list. The processor that froze the slab is the one who can
55
* perform list operations on the page. Other processors may put objects
56
* onto the freelist but the processor that froze the slab is the only
57
* one that can retrieve the objects from the page's freelist.
45
59
* The list_lock protects the partial and full list on each node and
46
60
* the partial slab counter. If taken then no new slabs may be added or
338
346
return x.x & OO_MASK;
350
* Per slab locking using the pagelock
352
static __always_inline void slab_lock(struct page *page)
354
bit_spin_lock(PG_locked, &page->flags);
357
static __always_inline void slab_unlock(struct page *page)
359
__bit_spin_unlock(PG_locked, &page->flags);
362
/* Interrupts must be disabled (for the fallback code to work right) */
363
static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
364
void *freelist_old, unsigned long counters_old,
365
void *freelist_new, unsigned long counters_new,
368
VM_BUG_ON(!irqs_disabled());
369
#ifdef CONFIG_CMPXCHG_DOUBLE
370
if (s->flags & __CMPXCHG_DOUBLE) {
371
if (cmpxchg_double(&page->freelist,
372
freelist_old, counters_old,
373
freelist_new, counters_new))
379
if (page->freelist == freelist_old && page->counters == counters_old) {
380
page->freelist = freelist_new;
381
page->counters = counters_new;
389
stat(s, CMPXCHG_DOUBLE_FAIL);
391
#ifdef SLUB_DEBUG_CMPXCHG
392
printk(KERN_INFO "%s %s: cmpxchg double redo ", n, s->name);
398
static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
399
void *freelist_old, unsigned long counters_old,
400
void *freelist_new, unsigned long counters_new,
403
#ifdef CONFIG_CMPXCHG_DOUBLE
404
if (s->flags & __CMPXCHG_DOUBLE) {
405
if (cmpxchg_double(&page->freelist,
406
freelist_old, counters_old,
407
freelist_new, counters_new))
414
local_irq_save(flags);
416
if (page->freelist == freelist_old && page->counters == counters_old) {
417
page->freelist = freelist_new;
418
page->counters = counters_new;
420
local_irq_restore(flags);
424
local_irq_restore(flags);
428
stat(s, CMPXCHG_DOUBLE_FAIL);
430
#ifdef SLUB_DEBUG_CMPXCHG
431
printk(KERN_INFO "%s %s: cmpxchg double redo ", n, s->name);
341
437
#ifdef CONFIG_SLUB_DEBUG
343
439
* Determine a map of object in use on a page.
345
* Slab lock or node listlock must be held to guarantee that the page does
441
* Node listlock must be held to guarantee that the page does
346
442
* not vanish from under us.
348
444
static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
1356
* Per slab locking using the pagelock
1358
static __always_inline void slab_lock(struct page *page)
1360
bit_spin_lock(PG_locked, &page->flags);
1363
static __always_inline void slab_unlock(struct page *page)
1365
__bit_spin_unlock(PG_locked, &page->flags);
1368
static __always_inline int slab_trylock(struct page *page)
1372
rc = bit_spin_trylock(PG_locked, &page->flags);
1377
* Management of partially allocated slabs
1379
static void add_partial(struct kmem_cache_node *n,
1460
* Management of partially allocated slabs.
1462
* list_lock must be held.
1464
static inline void add_partial(struct kmem_cache_node *n,
1380
1465
struct page *page, int tail)
1382
spin_lock(&n->list_lock);
1383
1467
n->nr_partial++;
1468
if (tail == DEACTIVATE_TO_TAIL)
1385
1469
list_add_tail(&page->lru, &n->partial);
1387
1471
list_add(&page->lru, &n->partial);
1388
spin_unlock(&n->list_lock);
1391
static inline void __remove_partial(struct kmem_cache_node *n,
1475
* list_lock must be held.
1477
static inline void remove_partial(struct kmem_cache_node *n,
1392
1478
struct page *page)
1394
1480
list_del(&page->lru);
1395
1481
n->nr_partial--;
1398
static void remove_partial(struct kmem_cache *s, struct page *page)
1400
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1402
spin_lock(&n->list_lock);
1403
__remove_partial(n, page);
1404
spin_unlock(&n->list_lock);
1408
* Lock slab and remove from the partial list.
1485
* Lock slab, remove from the partial list and put the object into the
1488
* Returns a list of objects or NULL if it fails.
1410
1490
* Must hold list_lock.
1412
static inline int lock_and_freeze_slab(struct kmem_cache_node *n,
1492
static inline void *acquire_slab(struct kmem_cache *s,
1493
struct kmem_cache_node *n, struct page *page,
1415
if (slab_trylock(page)) {
1416
__remove_partial(n, page);
1417
__SetPageSlubFrozen(page);
1497
unsigned long counters;
1501
* Zap the freelist and set the frozen bit.
1502
* The old freelist is the list of objects for the
1503
* per cpu allocation list.
1506
freelist = page->freelist;
1507
counters = page->counters;
1508
new.counters = counters;
1510
new.inuse = page->objects;
1512
VM_BUG_ON(new.frozen);
1515
} while (!__cmpxchg_double_slab(s, page,
1518
"lock and freeze"));
1520
remove_partial(n, page);
1524
static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
1424
1527
* Try to allocate a partial slab from a specific node.
1426
static struct page *get_partial_node(struct kmem_cache_node *n)
1529
static void *get_partial_node(struct kmem_cache *s,
1530
struct kmem_cache_node *n, struct kmem_cache_cpu *c)
1532
struct page *page, *page2;
1533
void *object = NULL;
1431
1536
* Racy check. If we mistakenly see no partial slabs then we
1439
1544
spin_lock(&n->list_lock);
1440
list_for_each_entry(page, &n->partial, lru)
1441
if (lock_and_freeze_slab(n, page))
1545
list_for_each_entry_safe(page, page2, &n->partial, lru) {
1546
void *t = acquire_slab(s, n, page, object == NULL);
1554
c->node = page_to_nid(page);
1555
stat(s, ALLOC_FROM_PARTIAL);
1557
available = page->objects - page->inuse;
1560
available = put_cpu_partial(s, page, 0);
1562
if (kmem_cache_debug(s) || available > s->cpu_partial / 2)
1445
1566
spin_unlock(&n->list_lock);
1450
1571
* Get a page from somewhere. Search in increasing NUMA distances.
1452
static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
1573
static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags,
1574
struct kmem_cache_cpu *c)
1454
1576
#ifdef CONFIG_NUMA
1455
1577
struct zonelist *zonelist;
1456
1578
struct zoneref *z;
1457
1579
struct zone *zone;
1458
1580
enum zone_type high_zoneidx = gfp_zone(flags);
1462
1584
* The defrag ratio allows a configuration of the tradeoffs between
1505
1627
* Get a partial page, lock it and return it.
1507
static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
1629
static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
1630
struct kmem_cache_cpu *c)
1510
1633
int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node;
1512
page = get_partial_node(get_node(s, searchnode));
1513
if (page || node != NUMA_NO_NODE)
1516
return get_any_partial(s, flags);
1520
* Move a page back to the lists.
1522
* Must be called with the slab lock held.
1524
* On exit the slab lock will have been dropped.
1526
static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
1529
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1531
__ClearPageSlubFrozen(page);
1534
if (page->freelist) {
1535
add_partial(n, page, tail);
1536
stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
1538
stat(s, DEACTIVATE_FULL);
1539
if (kmem_cache_debug(s) && (s->flags & SLAB_STORE_USER))
1544
stat(s, DEACTIVATE_EMPTY);
1545
if (n->nr_partial < s->min_partial) {
1547
* Adding an empty slab to the partial slabs in order
1548
* to avoid page allocator overhead. This slab needs
1549
* to come after the other slabs with objects in
1550
* so that the others get filled first. That way the
1551
* size of the partial list stays small.
1553
* kmem_cache_shrink can reclaim any empty slabs from
1556
add_partial(n, page, 1);
1561
discard_slab(s, page);
1635
object = get_partial_node(s, get_node(s, searchnode), c);
1636
if (object || node != NUMA_NO_NODE)
1639
return get_any_partial(s, flags, c);
1566
1642
#ifdef CONFIG_PREEMPT
1629
1705
for_each_possible_cpu(cpu)
1630
1706
per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
1633
1710
* Remove the cpu slab
1635
1712
static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
1714
enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
1638
1715
struct page *page = c->page;
1716
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1718
enum slab_modes l = M_NONE, m = M_NONE;
1721
int tail = DEACTIVATE_TO_HEAD;
1725
if (page->freelist) {
1642
1726
stat(s, DEACTIVATE_REMOTE_FREES);
1644
* Merge cpu freelist into slab freelist. Typically we get here
1645
* because both freelists are empty. So this is unlikely
1648
while (unlikely(c->freelist)) {
1651
tail = 0; /* Hot objects. Put the slab first */
1653
/* Retrieve object from cpu_freelist */
1654
object = c->freelist;
1655
c->freelist = get_freepointer(s, c->freelist);
1657
/* And put onto the regular freelist */
1658
set_freepointer(s, object, page->freelist);
1659
page->freelist = object;
1727
tail = DEACTIVATE_TO_TAIL;
1730
c->tid = next_tid(c->tid);
1662
1731
c->page = NULL;
1663
c->tid = next_tid(c->tid);
1664
unfreeze_slab(s, page, tail);
1732
freelist = c->freelist;
1736
* Stage one: Free all available per cpu objects back
1737
* to the page freelist while it is still frozen. Leave the
1740
* There is no need to take the list->lock because the page
1743
while (freelist && (nextfree = get_freepointer(s, freelist))) {
1745
unsigned long counters;
1748
prior = page->freelist;
1749
counters = page->counters;
1750
set_freepointer(s, freelist, prior);
1751
new.counters = counters;
1753
VM_BUG_ON(!new.frozen);
1755
} while (!__cmpxchg_double_slab(s, page,
1757
freelist, new.counters,
1758
"drain percpu freelist"));
1760
freelist = nextfree;
1764
* Stage two: Ensure that the page is unfrozen while the
1765
* list presence reflects the actual number of objects
1768
* We setup the list membership and then perform a cmpxchg
1769
* with the count. If there is a mismatch then the page
1770
* is not unfrozen but the page is on the wrong list.
1772
* Then we restart the process which may have to remove
1773
* the page from the list that we just put it on again
1774
* because the number of objects in the slab may have
1779
old.freelist = page->freelist;
1780
old.counters = page->counters;
1781
VM_BUG_ON(!old.frozen);
1783
/* Determine target state of the slab */
1784
new.counters = old.counters;
1787
set_freepointer(s, freelist, old.freelist);
1788
new.freelist = freelist;
1790
new.freelist = old.freelist;
1794
if (!new.inuse && n->nr_partial > s->min_partial)
1796
else if (new.freelist) {
1801
* Taking the spinlock removes the possiblity
1802
* that acquire_slab() will see a slab page that
1805
spin_lock(&n->list_lock);
1809
if (kmem_cache_debug(s) && !lock) {
1812
* This also ensures that the scanning of full
1813
* slabs from diagnostic functions will not see
1816
spin_lock(&n->list_lock);
1824
remove_partial(n, page);
1826
else if (l == M_FULL)
1828
remove_full(s, page);
1830
if (m == M_PARTIAL) {
1832
add_partial(n, page, tail);
1835
} else if (m == M_FULL) {
1837
stat(s, DEACTIVATE_FULL);
1838
add_full(s, n, page);
1844
if (!__cmpxchg_double_slab(s, page,
1845
old.freelist, old.counters,
1846
new.freelist, new.counters,
1851
spin_unlock(&n->list_lock);
1854
stat(s, DEACTIVATE_EMPTY);
1855
discard_slab(s, page);
1860
/* Unfreeze all the cpu partial slabs */
1861
static void unfreeze_partials(struct kmem_cache *s)
1863
struct kmem_cache_node *n = NULL;
1864
struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab);
1865
struct page *page, *discard_page = NULL;
1867
while ((page = c->partial)) {
1868
enum slab_modes { M_PARTIAL, M_FREE };
1869
enum slab_modes l, m;
1873
c->partial = page->next;
1878
old.freelist = page->freelist;
1879
old.counters = page->counters;
1880
VM_BUG_ON(!old.frozen);
1882
new.counters = old.counters;
1883
new.freelist = old.freelist;
1887
if (!new.inuse && (!n || n->nr_partial > s->min_partial))
1890
struct kmem_cache_node *n2 = get_node(s,
1896
spin_unlock(&n->list_lock);
1899
spin_lock(&n->list_lock);
1905
remove_partial(n, page);
1907
add_partial(n, page,
1908
DEACTIVATE_TO_TAIL);
1913
} while (!cmpxchg_double_slab(s, page,
1914
old.freelist, old.counters,
1915
new.freelist, new.counters,
1916
"unfreezing slab"));
1919
page->next = discard_page;
1920
discard_page = page;
1925
spin_unlock(&n->list_lock);
1927
while (discard_page) {
1928
page = discard_page;
1929
discard_page = discard_page->next;
1931
stat(s, DEACTIVATE_EMPTY);
1932
discard_slab(s, page);
1938
* Put a page that was just frozen (in __slab_free) into a partial page
1939
* slot if available. This is done without interrupts disabled and without
1940
* preemption disabled. The cmpxchg is racy and may put the partial page
1941
* onto a random cpus partial slot.
1943
* If we did not find a slot then simply move all the partials to the
1944
* per node partial list.
1946
int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
1948
struct page *oldpage;
1955
oldpage = this_cpu_read(s->cpu_slab->partial);
1958
pobjects = oldpage->pobjects;
1959
pages = oldpage->pages;
1960
if (drain && pobjects > s->cpu_partial) {
1961
unsigned long flags;
1963
* partial array is full. Move the existing
1964
* set to the per node partial list.
1966
local_irq_save(flags);
1967
unfreeze_partials(s);
1968
local_irq_restore(flags);
1975
pobjects += page->objects - page->inuse;
1977
page->pages = pages;
1978
page->pobjects = pobjects;
1979
page->next = oldpage;
1981
} while (irqsafe_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
1982
stat(s, CPU_PARTIAL_FREE);
1667
1986
static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
1669
1988
stat(s, CPUSLAB_FLUSH);
1671
1989
deactivate_slab(s, c);
1807
2157
c = this_cpu_ptr(s->cpu_slab);
1810
/* We handle __GFP_ZERO in the caller */
1811
gfpflags &= ~__GFP_ZERO;
1818
if (unlikely(!node_match(c, node)))
2163
if (unlikely(!node_match(c, node))) {
2164
stat(s, ALLOC_NODE_MISMATCH);
2165
deactivate_slab(s, c);
2169
/* must check again c->freelist in case of cpu migration or IRQ */
2170
object = c->freelist;
2174
stat(s, ALLOC_SLOWPATH);
2177
object = c->page->freelist;
2178
counters = c->page->counters;
2179
new.counters = counters;
2180
VM_BUG_ON(!new.frozen);
2183
* If there is no object left then we use this loop to
2184
* deactivate the slab which is simple since no objects
2185
* are left in the slab and therefore we do not need to
2186
* put the page back onto the partial list.
2188
* If there are objects left then we retrieve them
2189
* and use them to refill the per cpu queue.
2192
new.inuse = c->page->objects;
2193
new.frozen = object != NULL;
2195
} while (!__cmpxchg_double_slab(s, c->page,
2202
stat(s, DEACTIVATE_BYPASS);
1821
2206
stat(s, ALLOC_REFILL);
1824
object = page->freelist;
1825
if (unlikely(!object))
1827
if (kmem_cache_debug(s))
1830
2209
c->freelist = get_freepointer(s, object);
1831
page->inuse = page->objects;
1832
page->freelist = NULL;
1835
2210
c->tid = next_tid(c->tid);
1836
2211
local_irq_restore(flags);
1837
stat(s, ALLOC_SLOWPATH);
1841
deactivate_slab(s, c);
1844
page = get_partial(s, gfpflags, node);
1846
stat(s, ALLOC_FROM_PARTIAL);
1847
c->node = page_to_nid(page);
1852
gfpflags &= gfp_allowed_mask;
1853
if (gfpflags & __GFP_WAIT)
1856
page = new_slab(s, gfpflags, node);
1858
if (gfpflags & __GFP_WAIT)
1859
local_irq_disable();
1862
c = __this_cpu_ptr(s->cpu_slab);
1863
stat(s, ALLOC_SLAB);
1868
__SetPageSlubFrozen(page);
1869
c->node = page_to_nid(page);
1873
if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
1874
slab_out_of_memory(s, gfpflags, node);
1875
local_irq_restore(flags);
1878
if (!alloc_debug_processing(s, page, object, addr))
1882
page->freelist = get_freepointer(s, object);
2217
c->page = c->partial;
2218
c->partial = c->page->next;
2219
c->node = page_to_nid(c->page);
2220
stat(s, CPU_PARTIAL_ALLOC);
2225
/* Then do expensive stuff like retrieving pages from the partial lists */
2226
object = get_partial(s, gfpflags, node, c);
2228
if (unlikely(!object)) {
2230
object = new_slab_objects(s, gfpflags, node, &c);
2232
if (unlikely(!object)) {
2233
if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
2234
slab_out_of_memory(s, gfpflags, node);
2236
local_irq_restore(flags);
2241
if (likely(!kmem_cache_debug(s)))
2244
/* Only entered in the debug case */
2245
if (!alloc_debug_processing(s, c->page, object, addr))
2246
goto new_slab; /* Slab failed checks. Next slab needed */
2248
c->freelist = get_freepointer(s, object);
1883
2249
deactivate_slab(s, c);
1885
2250
c->node = NUMA_NO_NODE;
1886
2251
local_irq_restore(flags);
2033
2398
void **object = (void *)x;
2034
unsigned long flags;
2402
unsigned long counters;
2403
struct kmem_cache_node *n = NULL;
2404
unsigned long uninitialized_var(flags);
2036
local_irq_save(flags);
2038
2406
stat(s, FREE_SLOWPATH);
2040
2408
if (kmem_cache_debug(s) && !free_debug_processing(s, page, x, addr))
2043
prior = page->freelist;
2044
set_freepointer(s, object, prior);
2045
page->freelist = object;
2048
if (unlikely(PageSlubFrozen(page))) {
2412
prior = page->freelist;
2413
counters = page->counters;
2414
set_freepointer(s, object, prior);
2415
new.counters = counters;
2416
was_frozen = new.frozen;
2418
if ((!new.inuse || !prior) && !was_frozen && !n) {
2420
if (!kmem_cache_debug(s) && !prior)
2423
* Slab was on no list before and will be partially empty
2424
* We can defer the list move and instead freeze it.
2428
else { /* Needs to be taken off a list */
2430
n = get_node(s, page_to_nid(page));
2432
* Speculatively acquire the list_lock.
2433
* If the cmpxchg does not succeed then we may
2434
* drop the list_lock without any processing.
2436
* Otherwise the list_lock will synchronize with
2437
* other processors updating the list of slabs.
2439
spin_lock_irqsave(&n->list_lock, flags);
2445
} while (!cmpxchg_double_slab(s, page,
2447
object, new.counters,
2453
* If we just froze the page then put it onto the
2454
* per cpu partial list.
2456
if (new.frozen && !was_frozen)
2457
put_cpu_partial(s, page, 1);
2460
* The list lock was not taken therefore no list
2461
* activity can be necessary.
2464
stat(s, FREE_FROZEN);
2469
* was_frozen may have been set after we acquired the list_lock in
2470
* an earlier loop. So we need to check it here again.
2049
2473
stat(s, FREE_FROZEN);
2053
if (unlikely(!page->inuse))
2057
* Objects left in the slab. If it was not on the partial list before
2060
if (unlikely(!prior)) {
2061
add_partial(get_node(s, page_to_nid(page)), page, 1);
2062
stat(s, FREE_ADD_PARTIAL);
2067
local_irq_restore(flags);
2475
if (unlikely(!inuse && n->nr_partial > s->min_partial))
2479
* Objects left in the slab. If it was not on the partial list before
2482
if (unlikely(!prior)) {
2483
remove_full(s, page);
2484
add_partial(n, page, DEACTIVATE_TO_TAIL);
2485
stat(s, FREE_ADD_PARTIAL);
2488
spin_unlock_irqrestore(&n->list_lock, flags);
2073
* Slab still on the partial list.
2494
* Slab on the partial list.
2075
remove_partial(s, page);
2496
remove_partial(n, page);
2076
2497
stat(s, FREE_REMOVE_PARTIAL);
2079
local_irq_restore(flags);
2499
/* Slab must be on the full list */
2500
remove_full(s, page);
2502
spin_unlock_irqrestore(&n->list_lock, flags);
2080
2503
stat(s, FREE_SLAB);
2081
2504
discard_slab(s, page);
3007
#ifdef CONFIG_CMPXCHG_DOUBLE
3008
if (system_has_cmpxchg_double() && (s->flags & SLAB_DEBUG_FLAGS) == 0)
3009
/* Enable fast mode */
3010
s->flags |= __CMPXCHG_DOUBLE;
2593
3014
* The larger the object size is, the more pages we want on the partial
2594
3015
* list to avoid pounding the page allocator excessively.
2596
set_min_partial(s, ilog2(s->size));
3017
set_min_partial(s, ilog2(s->size) / 2);
3020
* cpu_partial determined the maximum number of objects kept in the
3021
* per cpu partial lists of a processor.
3023
* Per cpu partial lists mainly contain slabs that just have one
3024
* object freed. If they are used for allocation then they can be
3025
* filled up again with minimal effort. The slab will never hit the
3026
* per node partial lists and therefore no locking will be required.
3028
* This setting also determines
3030
* A) The number of objects from per cpu partial slabs dumped to the
3031
* per node list when we reach the limit.
3032
* B) The number of objects in cpu partial slabs to extract from the
3033
* per node list when we run out of per cpu objects. We only fetch 50%
3034
* to keep some capacity around for frees.
3036
if (s->size >= PAGE_SIZE)
3038
else if (s->size >= 1024)
3040
else if (s->size >= 256)
3041
s->cpu_partial = 13;
3043
s->cpu_partial = 30;
2597
3045
s->refcount = 1;
2598
3046
#ifdef CONFIG_NUMA
2599
3047
s->remote_node_defrag_ratio = 1000;
2993
3477
* list_lock. page->inuse here is the upper limit.
2995
3479
list_for_each_entry_safe(page, t, &n->partial, lru) {
2996
if (!page->inuse && slab_trylock(page)) {
2998
* Must hold slab lock here because slab_free
2999
* may have freed the last object and be
3000
* waiting to release the slab.
3002
__remove_partial(n, page);
3004
discard_slab(s, page);
3006
list_move(&page->lru,
3007
slabs_by_inuse + page->inuse);
3480
list_move(&page->lru, slabs_by_inuse + page->inuse);
3012
3486
* Rebuild the partial list with the slabs filled up most
3013
3487
* first and the least used slabs at the end.
3015
for (i = objects - 1; i >= 0; i--)
3489
for (i = objects - 1; i > 0; i--)
3016
3490
list_splice(slabs_by_inuse + i, n->partial.prev);
3018
3492
spin_unlock_irqrestore(&n->list_lock, flags);
3494
/* Release empty slabs */
3495
list_for_each_entry_safe(page, t, slabs_by_inuse, lru)
3496
discard_slab(s, page);
3021
3499
kfree(slabs_by_inuse);
4179
4685
SLAB_ATTR_RO(objects_partial);
4687
static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
4694
for_each_online_cpu(cpu) {
4695
struct page *page = per_cpu_ptr(s->cpu_slab, cpu)->partial;
4698
pages += page->pages;
4699
objects += page->pobjects;
4703
len = sprintf(buf, "%d(%d)", objects, pages);
4706
for_each_online_cpu(cpu) {
4707
struct page *page = per_cpu_ptr(s->cpu_slab, cpu) ->partial;
4709
if (page && len < PAGE_SIZE - 20)
4710
len += sprintf(buf + len, " C%d=%d(%d)", cpu,
4711
page->pobjects, page->pages);
4714
return len + sprintf(buf + len, "\n");
4716
SLAB_ATTR_RO(slabs_cpu_partial);
4181
4718
static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
4183
4720
return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));