2
* bootmem - A boot-time physical memory allocator and configurator
4
* Copyright (C) 1999 Ingo Molnar
5
* 1999 Kanoj Sarcar, SGI
8
* Access to this subsystem has to be serialized externally (which is true
9
* for the boot process anyway).
11
#include <linux/init.h>
12
#include <linux/pfn.h>
13
#include <linux/slab.h>
14
#include <linux/bootmem.h>
15
#include <linux/module.h>
16
#include <linux/kmemleak.h>
17
#include <linux/range.h>
18
#include <linux/memblock.h>
22
#include <asm/processor.h>
26
#ifndef CONFIG_NEED_MULTIPLE_NODES
27
struct pglist_data __refdata contig_page_data;
28
EXPORT_SYMBOL(contig_page_data);
31
unsigned long max_low_pfn;
32
unsigned long min_low_pfn;
33
unsigned long max_pfn;
35
static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
41
if (limit > memblock.current_limit)
42
limit = memblock.current_limit;
44
addr = find_memory_core_early(nid, size, align, goal, limit);
46
if (addr == MEMBLOCK_ERROR)
49
ptr = phys_to_virt(addr);
51
memblock_x86_reserve_range(addr, addr + size, "BOOTMEM");
53
* The min_count is set to 0 so that bootmem allocated blocks
54
* are never reported as leaks.
56
kmemleak_alloc(ptr, size, 0, 0);
61
* free_bootmem_late - free bootmem pages directly to page allocator
62
* @addr: starting address of the range
63
* @size: size of the range in bytes
65
* This is only useful when the bootmem allocator has already been torn
66
* down, but we are still initializing the system. Pages are given directly
67
* to the page allocator, no bootmem metadata is updated because it is gone.
69
void __init free_bootmem_late(unsigned long addr, unsigned long size)
71
unsigned long cursor, end;
73
kmemleak_free_part(__va(addr), size);
75
cursor = PFN_UP(addr);
76
end = PFN_DOWN(addr + size);
78
for (; cursor < end; cursor++) {
79
__free_pages_bootmem(pfn_to_page(cursor), 0);
84
static void __init __free_pages_memory(unsigned long start, unsigned long end)
87
unsigned long start_aligned, end_aligned;
88
int order = ilog2(BITS_PER_LONG);
90
start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
91
end_aligned = end & ~(BITS_PER_LONG - 1);
93
if (end_aligned <= start_aligned) {
94
for (i = start; i < end; i++)
95
__free_pages_bootmem(pfn_to_page(i), 0);
100
for (i = start; i < start_aligned; i++)
101
__free_pages_bootmem(pfn_to_page(i), 0);
103
for (i = start_aligned; i < end_aligned; i += BITS_PER_LONG)
104
__free_pages_bootmem(pfn_to_page(i), order);
106
for (i = end_aligned; i < end; i++)
107
__free_pages_bootmem(pfn_to_page(i), 0);
110
unsigned long __init free_all_memory_core_early(int nodeid)
114
unsigned long count = 0;
115
struct range *range = NULL;
118
nr_range = get_free_all_memory_range(&range, nodeid);
120
for (i = 0; i < nr_range; i++) {
121
start = range[i].start;
123
count += end - start;
124
__free_pages_memory(start, end);
131
* free_all_bootmem_node - release a node's free pages to the buddy allocator
132
* @pgdat: node to be released
134
* Returns the number of pages actually released.
136
unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
138
register_page_bootmem_info_node(pgdat);
140
/* free_all_memory_core_early(MAX_NUMNODES) will be called later */
145
* free_all_bootmem - release free pages to the buddy allocator
147
* Returns the number of pages actually released.
149
unsigned long __init free_all_bootmem(void)
152
* We need to use MAX_NUMNODES instead of NODE_DATA(0)->node_id
153
* because in some case like Node0 doesn't have RAM installed
154
* low ram will be on Node1
155
* Use MAX_NUMNODES will make sure all ranges in early_node_map[]
156
* will be used instead of only Node0 related
158
return free_all_memory_core_early(MAX_NUMNODES);
162
* free_bootmem_node - mark a page range as usable
163
* @pgdat: node the range resides on
164
* @physaddr: starting address of the range
165
* @size: size of the range in bytes
167
* Partial pages will be considered reserved and left as they are.
169
* The range must reside completely on the specified node.
171
void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
174
kmemleak_free_part(__va(physaddr), size);
175
memblock_x86_free_range(physaddr, physaddr + size);
179
* free_bootmem - mark a page range as usable
180
* @addr: starting address of the range
181
* @size: size of the range in bytes
183
* Partial pages will be considered reserved and left as they are.
185
* The range must be contiguous but may span node boundaries.
187
void __init free_bootmem(unsigned long addr, unsigned long size)
189
kmemleak_free_part(__va(addr), size);
190
memblock_x86_free_range(addr, addr + size);
193
static void * __init ___alloc_bootmem_nopanic(unsigned long size,
200
if (WARN_ON_ONCE(slab_is_available()))
201
return kzalloc(size, GFP_NOWAIT);
205
ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, goal, limit);
219
* __alloc_bootmem_nopanic - allocate boot memory without panicking
220
* @size: size of the request in bytes
221
* @align: alignment of the region
222
* @goal: preferred starting address of the region
224
* The goal is dropped if it can not be satisfied and the allocation will
225
* fall back to memory below @goal.
227
* Allocation may happen on any node in the system.
229
* Returns NULL on failure.
231
void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
234
unsigned long limit = -1UL;
236
return ___alloc_bootmem_nopanic(size, align, goal, limit);
239
static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
240
unsigned long goal, unsigned long limit)
242
void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit);
247
* Whoops, we cannot satisfy the allocation request.
249
printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
250
panic("Out of memory");
255
* __alloc_bootmem - allocate boot memory
256
* @size: size of the request in bytes
257
* @align: alignment of the region
258
* @goal: preferred starting address of the region
260
* The goal is dropped if it can not be satisfied and the allocation will
261
* fall back to memory below @goal.
263
* Allocation may happen on any node in the system.
265
* The function panics if the request can not be satisfied.
267
void * __init __alloc_bootmem(unsigned long size, unsigned long align,
270
unsigned long limit = -1UL;
272
return ___alloc_bootmem(size, align, goal, limit);
276
* __alloc_bootmem_node - allocate boot memory from a specific node
277
* @pgdat: node to allocate from
278
* @size: size of the request in bytes
279
* @align: alignment of the region
280
* @goal: preferred starting address of the region
282
* The goal is dropped if it can not be satisfied and the allocation will
283
* fall back to memory below @goal.
285
* Allocation may fall back to any node in the system if the specified node
286
* can not hold the requested memory.
288
* The function panics if the request can not be satisfied.
290
void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
291
unsigned long align, unsigned long goal)
295
if (WARN_ON_ONCE(slab_is_available()))
296
return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
298
ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
303
return __alloc_memory_core_early(MAX_NUMNODES, size, align,
307
void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
308
unsigned long align, unsigned long goal)
311
unsigned long end_pfn;
313
if (WARN_ON_ONCE(slab_is_available()))
314
return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
316
/* update goal according ...MAX_DMA32_PFN */
317
end_pfn = pgdat->node_start_pfn + pgdat->node_spanned_pages;
319
if (end_pfn > MAX_DMA32_PFN + (128 >> (20 - PAGE_SHIFT)) &&
320
(goal >> PAGE_SHIFT) < MAX_DMA32_PFN) {
322
unsigned long new_goal;
324
new_goal = MAX_DMA32_PFN << PAGE_SHIFT;
325
ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
332
return __alloc_bootmem_node(pgdat, size, align, goal);
336
#ifdef CONFIG_SPARSEMEM
338
* alloc_bootmem_section - allocate boot memory from a specific section
339
* @size: size of the request in bytes
340
* @section_nr: sparse map section to allocate from
342
* Return NULL on failure.
344
void * __init alloc_bootmem_section(unsigned long size,
345
unsigned long section_nr)
347
unsigned long pfn, goal, limit;
349
pfn = section_nr_to_pfn(section_nr);
350
goal = pfn << PAGE_SHIFT;
351
limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT;
353
return __alloc_memory_core_early(early_pfn_to_nid(pfn), size,
354
SMP_CACHE_BYTES, goal, limit);
358
void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
359
unsigned long align, unsigned long goal)
363
if (WARN_ON_ONCE(slab_is_available()))
364
return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
366
ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
371
return __alloc_bootmem_nopanic(size, align, goal);
374
#ifndef ARCH_LOW_ADDRESS_LIMIT
375
#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
379
* __alloc_bootmem_low - allocate low boot memory
380
* @size: size of the request in bytes
381
* @align: alignment of the region
382
* @goal: preferred starting address of the region
384
* The goal is dropped if it can not be satisfied and the allocation will
385
* fall back to memory below @goal.
387
* Allocation may happen on any node in the system.
389
* The function panics if the request can not be satisfied.
391
void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
394
return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);
398
* __alloc_bootmem_low_node - allocate low boot memory from a specific node
399
* @pgdat: node to allocate from
400
* @size: size of the request in bytes
401
* @align: alignment of the region
402
* @goal: preferred starting address of the region
404
* The goal is dropped if it can not be satisfied and the allocation will
405
* fall back to memory below @goal.
407
* Allocation may fall back to any node in the system if the specified node
408
* can not hold the requested memory.
410
* The function panics if the request can not be satisfied.
412
void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
413
unsigned long align, unsigned long goal)
417
if (WARN_ON_ONCE(slab_is_available()))
418
return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
420
ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
421
goal, ARCH_LOW_ADDRESS_LIMIT);
425
return __alloc_memory_core_early(MAX_NUMNODES, size, align,
426
goal, ARCH_LOW_ADDRESS_LIMIT);