102
extern unsigned long find_max_low_pfn(void);
103
74
extern unsigned long highend_pfn, highstart_pfn;
105
76
#define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
107
unsigned long node_remap_size[MAX_NUMNODES];
108
78
static void *node_remap_start_vaddr[MAX_NUMNODES];
109
79
void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
111
static unsigned long kva_start_pfn;
112
static unsigned long kva_pages;
114
* FLAT - support for basic PC memory model with discontig enabled, essentially
115
* a single node with all available processors in it with a flat
118
int __init get_memcfg_numa_flat(void)
120
printk(KERN_DEBUG "NUMA - single node, flat memory mode\n");
122
node_start_pfn[0] = 0;
123
node_end_pfn[0] = max_pfn;
124
memblock_x86_register_active_regions(0, 0, max_pfn);
125
memory_present(0, 0, max_pfn);
126
node_remap_size[0] = node_memmap_size_bytes(0, 0, max_pfn);
128
/* Indicate there is one node available. */
129
nodes_clear(node_online_map);
135
* Find the highest page frame number we have available for the node
137
static void __init propagate_e820_map_node(int nid)
139
if (node_end_pfn[nid] > max_pfn)
140
node_end_pfn[nid] = max_pfn;
142
* if a user has given mem=XXXX, then we need to make sure
143
* that the node _starts_ before that, too, not just ends
145
if (node_start_pfn[nid] > max_pfn)
146
node_start_pfn[nid] = max_pfn;
147
BUG_ON(node_start_pfn[nid] > node_end_pfn[nid]);
151
* Allocate memory for the pg_data_t for this node via a crude pre-bootmem
152
* method. For node zero take this from the bottom of memory, for
153
* subsequent nodes place them at node_remap_start_vaddr which contains
154
* node local data in physically node local memory. See setup_memory()
157
static void __init allocate_pgdat(int nid)
161
if (node_has_online_mem(nid) && node_remap_start_vaddr[nid])
162
NODE_DATA(nid) = (pg_data_t *)node_remap_start_vaddr[nid];
164
unsigned long pgdat_phys;
165
pgdat_phys = memblock_find_in_range(min_low_pfn<<PAGE_SHIFT,
166
max_pfn_mapped<<PAGE_SHIFT,
169
NODE_DATA(nid) = (pg_data_t *)(pfn_to_kaddr(pgdat_phys>>PAGE_SHIFT));
170
memset(buf, 0, sizeof(buf));
171
sprintf(buf, "NODE_DATA %d", nid);
172
memblock_x86_reserve_range(pgdat_phys, pgdat_phys + sizeof(pg_data_t), buf);
174
printk(KERN_DEBUG "allocate_pgdat: node %d NODE_DATA %08lx\n",
175
nid, (unsigned long)NODE_DATA(nid));
179
* In the DISCONTIGMEM and SPARSEMEM memory model, a portion of the kernel
180
* virtual address space (KVA) is reserved and portions of nodes are mapped
181
* using it. This is to allow node-local memory to be allocated for
182
* structures that would normally require ZONE_NORMAL. The memory is
183
* allocated with alloc_remap() and callers should be prepared to allocate
184
* from the bootmem allocator instead.
82
* Remap memory allocator
186
84
static unsigned long node_remap_start_pfn[MAX_NUMNODES];
187
85
static void *node_remap_end_vaddr[MAX_NUMNODES];
188
86
static void *node_remap_alloc_vaddr[MAX_NUMNODES];
189
static unsigned long node_remap_offset[MAX_NUMNODES];
89
* alloc_remap - Allocate remapped memory
90
* @nid: NUMA node to allocate memory from
91
* @size: The size of allocation
93
* Allocate @size bytes from the remap area of NUMA node @nid. The
94
* size of the remap area is predetermined by init_alloc_remap() and
95
* only the callers considered there should call this function. For
96
* more info, please read the comment on top of init_alloc_remap().
98
* The caller must be ready to handle allocation failure from this
99
* function and fall back to regular memory allocator in such cases.
102
* Single CPU early boot context.
105
* Pointer to the allocated memory on success, %NULL on failure.
191
107
void *alloc_remap(int nid, unsigned long size)
193
109
void *allocation = node_remap_alloc_vaddr[nid];
195
111
size = ALIGN(size, L1_CACHE_BYTES);
197
if (!allocation || (allocation + size) >= node_remap_end_vaddr[nid])
113
if (!allocation || (allocation + size) > node_remap_end_vaddr[nid])
200
116
node_remap_alloc_vaddr[nid] += size;
261
static __init unsigned long calculate_numa_remap_pages(void)
264
unsigned long size, reserve_pages = 0;
266
for_each_online_node(nid) {
271
* The acpi/srat node info can show hot-add memroy zones
272
* where memory could be added but not currently present.
274
printk(KERN_DEBUG "node %d pfn: [%lx - %lx]\n",
275
nid, node_start_pfn[nid], node_end_pfn[nid]);
276
if (node_start_pfn[nid] > max_pfn)
278
if (!node_end_pfn[nid])
280
if (node_end_pfn[nid] > max_pfn)
281
node_end_pfn[nid] = max_pfn;
283
/* ensure the remap includes space for the pgdat. */
284
size = node_remap_size[nid] + sizeof(pg_data_t);
286
/* convert size to large (pmd size) pages, rounding up */
287
size = (size + LARGE_PAGE_BYTES - 1) / LARGE_PAGE_BYTES;
288
/* now the roundup is correct, convert to PAGE_SIZE pages */
289
size = size * PTRS_PER_PTE;
291
node_kva_target = round_down(node_end_pfn[nid] - size,
293
node_kva_target <<= PAGE_SHIFT;
295
node_kva_final = memblock_find_in_range(node_kva_target,
296
((u64)node_end_pfn[nid])<<PAGE_SHIFT,
297
((u64)size)<<PAGE_SHIFT,
299
node_kva_target -= LARGE_PAGE_BYTES;
300
} while (node_kva_final == MEMBLOCK_ERROR &&
301
(node_kva_target>>PAGE_SHIFT) > (node_start_pfn[nid]));
303
if (node_kva_final == MEMBLOCK_ERROR)
304
panic("Can not get kva ram\n");
306
node_remap_size[nid] = size;
307
node_remap_offset[nid] = reserve_pages;
308
reserve_pages += size;
309
printk(KERN_DEBUG "Reserving %ld pages of KVA for lmem_map of"
310
" node %d at %llx\n",
311
size, nid, node_kva_final>>PAGE_SHIFT);
314
* prevent kva address below max_low_pfn want it on system
315
* with less memory later.
316
* layout will be: KVA address , KVA RAM
318
* we are supposed to only record the one less then max_low_pfn
319
* but we could have some hole in high memory, and it will only
320
* check page_is_ram(pfn) && !page_is_reserved_early(pfn) to decide
322
* So memblock_x86_reserve_range here, hope we don't run out of that array
324
memblock_x86_reserve_range(node_kva_final,
325
node_kva_final+(((u64)size)<<PAGE_SHIFT),
328
node_remap_start_pfn[nid] = node_kva_final>>PAGE_SHIFT;
330
printk(KERN_INFO "Reserving total of %lx pages for numa KVA remap\n",
332
return reserve_pages;
335
static void init_remap_allocator(int nid)
337
node_remap_start_vaddr[nid] = pfn_to_kaddr(
338
kva_start_pfn + node_remap_offset[nid]);
339
node_remap_end_vaddr[nid] = node_remap_start_vaddr[nid] +
340
(node_remap_size[nid] * PAGE_SIZE);
341
node_remap_alloc_vaddr[nid] = node_remap_start_vaddr[nid] +
342
ALIGN(sizeof(pg_data_t), PAGE_SIZE);
344
printk(KERN_DEBUG "node %d will remap to vaddr %08lx - %08lx\n", nid,
345
(ulong) node_remap_start_vaddr[nid],
346
(ulong) node_remap_end_vaddr[nid]);
349
void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn,
159
* init_alloc_remap - Initialize remap allocator for a NUMA node
160
* @nid: NUMA node to initizlie remap allocator for
162
* NUMA nodes may end up without any lowmem. As allocating pgdat and
163
* memmap on a different node with lowmem is inefficient, a special
164
* remap allocator is implemented which can be used by alloc_remap().
166
* For each node, the amount of memory which will be necessary for
167
* pgdat and memmap is calculated and two memory areas of the size are
168
* allocated - one in the node and the other in lowmem; then, the area
169
* in the node is remapped to the lowmem area.
171
* As pgdat and memmap must be allocated in lowmem anyway, this
172
* doesn't waste lowmem address space; however, the actual lowmem
173
* which gets remapped over is wasted. The amount shouldn't be
174
* problematic on machines this feature will be used.
176
* Initialization failure isn't fatal. alloc_remap() is used
177
* opportunistically and the callers will fall back to other memory
178
* allocation mechanisms on failure.
180
void __init init_alloc_remap(int nid, u64 start, u64 end)
182
unsigned long start_pfn = start >> PAGE_SHIFT;
183
unsigned long end_pfn = end >> PAGE_SHIFT;
184
unsigned long size, pfn;
185
u64 node_pa, remap_pa;
356
* When mapping a NUMA machine we allocate the node_mem_map arrays
357
* from node local memory. They are then mapped directly into KVA
358
* between zone normal and vmalloc space. Calculate the size of
359
* this space and use it to adjust the boundary between ZONE_NORMAL
189
* The acpi/srat node info can show hot-add memroy zones where
190
* memory could be added but not currently present.
365
kva_pages = roundup(calculate_numa_remap_pages(), PTRS_PER_PTE);
367
kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE);
369
kva_start_pfn = memblock_find_in_range(kva_target_pfn<<PAGE_SHIFT,
370
max_low_pfn<<PAGE_SHIFT,
371
kva_pages<<PAGE_SHIFT,
372
PTRS_PER_PTE<<PAGE_SHIFT) >> PAGE_SHIFT;
373
kva_target_pfn -= PTRS_PER_PTE;
374
} while (kva_start_pfn == MEMBLOCK_ERROR && kva_target_pfn > min_low_pfn);
376
if (kva_start_pfn == MEMBLOCK_ERROR)
377
panic("Can not get kva space\n");
379
printk(KERN_INFO "kva_start_pfn ~ %lx max_low_pfn ~ %lx\n",
380
kva_start_pfn, max_low_pfn);
381
printk(KERN_INFO "max_pfn = %lx\n", max_pfn);
383
/* avoid clash with initrd */
384
memblock_x86_reserve_range(kva_start_pfn<<PAGE_SHIFT,
385
(kva_start_pfn + kva_pages)<<PAGE_SHIFT,
192
printk(KERN_DEBUG "node %d pfn: [%lx - %lx]\n",
193
nid, start_pfn, end_pfn);
195
/* calculate the necessary space aligned to large page size */
196
size = node_memmap_size_bytes(nid, start_pfn, end_pfn);
197
size += ALIGN(sizeof(pg_data_t), PAGE_SIZE);
198
size = ALIGN(size, LARGE_PAGE_BYTES);
200
/* allocate node memory and the lowmem remap area */
201
node_pa = memblock_find_in_range(start, end, size, LARGE_PAGE_BYTES);
202
if (node_pa == MEMBLOCK_ERROR) {
203
pr_warning("remap_alloc: failed to allocate %lu bytes for node %d\n",
207
memblock_x86_reserve_range(node_pa, node_pa + size, "KVA RAM");
209
remap_pa = memblock_find_in_range(min_low_pfn << PAGE_SHIFT,
210
max_low_pfn << PAGE_SHIFT,
211
size, LARGE_PAGE_BYTES);
212
if (remap_pa == MEMBLOCK_ERROR) {
213
pr_warning("remap_alloc: failed to allocate %lu bytes remap area for node %d\n",
215
memblock_x86_free_range(node_pa, node_pa + size);
218
memblock_x86_reserve_range(remap_pa, remap_pa + size, "KVA PG");
219
remap_va = phys_to_virt(remap_pa);
221
/* perform actual remap */
222
for (pfn = 0; pfn < size >> PAGE_SHIFT; pfn += PTRS_PER_PTE)
223
set_pmd_pfn((unsigned long)remap_va + (pfn << PAGE_SHIFT),
224
(node_pa >> PAGE_SHIFT) + pfn,
227
/* initialize remap allocator parameters */
228
node_remap_start_pfn[nid] = node_pa >> PAGE_SHIFT;
229
node_remap_start_vaddr[nid] = remap_va;
230
node_remap_end_vaddr[nid] = remap_va + size;
231
node_remap_alloc_vaddr[nid] = remap_va;
233
printk(KERN_DEBUG "remap_alloc: node %d [%08llx-%08llx) -> [%p-%p)\n",
234
nid, node_pa, node_pa + size, remap_va, remap_va + size);
237
void __init initmem_init(void)
387
241
#ifdef CONFIG_HIGHMEM
388
242
highstart_pfn = highend_pfn = max_pfn;
389
243
if (max_pfn > max_low_pfn)