11
11
#include <linux/swapops.h>
12
12
#include <linux/kmemleak.h>
15
__init_page_cgroup(struct page_cgroup *pc, unsigned long pfn)
14
static void __meminit init_page_cgroup(struct page_cgroup *pc, unsigned long id)
17
set_page_cgroup_array_id(pc, id);
18
18
pc->mem_cgroup = NULL;
19
pc->page = pfn_to_page(pfn);
20
19
INIT_LIST_HEAD(&pc->lru);
22
21
static unsigned long total_usage;
105
117
return section->page_cgroup + pfn;
108
/* __alloc_bootmem...() is protected by !slab_available() */
109
static int __init_refok init_section_page_cgroup(unsigned long pfn)
111
struct mem_section *section = __pfn_to_section(pfn);
120
struct page *lookup_cgroup_page(struct page_cgroup *pc)
122
struct mem_section *section;
126
nr = page_cgroup_array_id(pc);
127
section = __nr_to_section(nr);
128
page = pfn_to_page(pc - section->page_cgroup);
129
VM_BUG_ON(pc != lookup_page_cgroup(page));
133
static void *__meminit alloc_page_cgroup(size_t size, int nid)
137
addr = alloc_pages_exact_nid(nid, size, GFP_KERNEL | __GFP_NOWARN);
141
if (node_state(nid, N_HIGH_MEMORY))
142
addr = vmalloc_node(size, nid);
144
addr = vmalloc(size);
149
#ifdef CONFIG_MEMORY_HOTPLUG
150
static void free_page_cgroup(void *addr)
152
if (is_vmalloc_addr(addr)) {
155
struct page *page = virt_to_page(addr);
157
sizeof(struct page_cgroup) * PAGES_PER_SECTION;
159
BUG_ON(PageReserved(page));
160
free_pages_exact(addr, table_size);
165
static int __meminit init_section_page_cgroup(unsigned long pfn, int nid)
112
167
struct page_cgroup *base, *pc;
168
struct mem_section *section;
113
169
unsigned long table_size;
116
if (!section->page_cgroup) {
117
nid = page_to_nid(pfn_to_page(pfn));
118
table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
119
VM_BUG_ON(!slab_is_available());
120
if (node_state(nid, N_HIGH_MEMORY)) {
121
base = kmalloc_node(table_size,
122
GFP_KERNEL | __GFP_NOWARN, nid);
124
base = vmalloc_node(table_size, nid);
126
base = kmalloc(table_size, GFP_KERNEL | __GFP_NOWARN);
128
base = vmalloc(table_size);
131
* The value stored in section->page_cgroup is (base - pfn)
132
* and it does not point to the memory block allocated above,
133
* causing kmemleak false positives.
135
kmemleak_not_leak(base);
138
* We don't have to allocate page_cgroup again, but
139
* address of memmap may be changed. So, we have to initialize
142
base = section->page_cgroup + pfn;
144
/* check address of memmap is changed or not. */
145
if (base->page == pfn_to_page(pfn))
173
nr = pfn_to_section_nr(pfn);
174
section = __nr_to_section(nr);
176
if (section->page_cgroup)
179
table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
180
base = alloc_page_cgroup(table_size, nid);
183
* The value stored in section->page_cgroup is (base - pfn)
184
* and it does not point to the memory block allocated above,
185
* causing kmemleak false positives.
187
kmemleak_not_leak(base);
150
190
printk(KERN_ERR "page cgroup allocation failure\n");
154
194
for (index = 0; index < PAGES_PER_SECTION; index++) {
155
195
pc = base + index;
156
__init_page_cgroup(pc, pfn + index);
196
init_page_cgroup(pc, nr);
199
* The passed "pfn" may not be aligned to SECTION. For the calculation
200
* we need to apply a mask.
202
pfn &= PAGE_SECTION_MASK;
159
203
section->page_cgroup = base - pfn;
160
204
total_usage += table_size;
192
228
start = start_pfn & ~(PAGES_PER_SECTION - 1);
193
229
end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION);
233
* In this case, "nid" already exists and contains valid memory.
234
* "start_pfn" passed to us is a pfn which is an arg for
235
* online__pages(), and start_pfn should exist.
237
nid = pfn_to_nid(start_pfn);
238
VM_BUG_ON(!node_state(nid, N_ONLINE));
195
241
for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
196
242
if (!pfn_present(pfn))
198
fail = init_section_page_cgroup(pfn);
244
fail = init_section_page_cgroup(pfn, nid);
256
297
void __init page_cgroup_init(void)
258
299
unsigned long pfn;
261
302
if (mem_cgroup_disabled())
264
for (pfn = 0; !fail && pfn < max_pfn; pfn += PAGES_PER_SECTION) {
265
if (!pfn_present(pfn))
267
fail = init_section_page_cgroup(pfn);
270
printk(KERN_CRIT "try 'cgroup_disable=memory' boot option\n");
271
panic("Out of memory");
273
hotplug_memory_notifier(page_cgroup_callback, 0);
305
for_each_node_state(nid, N_HIGH_MEMORY) {
306
unsigned long start_pfn, end_pfn;
308
start_pfn = node_start_pfn(nid);
309
end_pfn = node_end_pfn(nid);
311
* start_pfn and end_pfn may not be aligned to SECTION and the
312
* page->flags of out of node pages are not initialized. So we
313
* scan [start_pfn, the biggest section's pfn < end_pfn) here.
315
for (pfn = start_pfn;
317
pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) {
322
* Nodes's pfns can be overlapping.
323
* We know some arch can have a nodes layout such as
324
* -------------pfn-------------->
325
* N0 | N1 | N2 | N0 | N1 | N2|....
327
if (pfn_to_nid(pfn) != nid)
329
if (init_section_page_cgroup(pfn, nid))
333
hotplug_memory_notifier(page_cgroup_callback, 0);
275
334
printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
276
printk(KERN_INFO "please try 'cgroup_disable=memory' option if you don't"
277
" want memory cgroups\n");
335
printk(KERN_INFO "please try 'cgroup_disable=memory' option if you "
336
"don't want memory cgroups\n");
339
printk(KERN_CRIT "try 'cgroup_disable=memory' boot option\n");
340
panic("Out of memory");
280
343
void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)