7
7
#include <linux/crash_dump.h>
8
8
#include <linux/smp.h>
9
9
#include <linux/topology.h>
10
#include <linux/pfn.h>
10
11
#include <asm/sections.h>
11
12
#include <asm/processor.h>
12
13
#include <asm/setup.h>
13
14
#include <asm/mpspec.h>
14
15
#include <asm/apicdef.h>
15
16
#include <asm/highmem.h>
17
#include <asm/proto.h>
18
#include <asm/cpumask.h>
20
#include <asm/stackprotector.h>
22
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
23
# define DBG(x...) printk(KERN_DEBUG x)
28
DEFINE_PER_CPU(int, cpu_number);
29
EXPORT_PER_CPU_SYMBOL(cpu_number);
32
#define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
34
#define BOOT_PERCPU_OFFSET 0
37
DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
38
EXPORT_PER_CPU_SYMBOL(this_cpu_off);
40
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
41
[0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
43
EXPORT_SYMBOL(__per_cpu_offset);
46
* On x86_64 symbols referenced from code should be reachable using
47
* 32bit relocations. Reserve space for static percpu variables in
48
* modules so that they are always served from the first chunk which
49
* is located at the percpu segment base. On x86_32, anything can
50
* address anywhere. No need to reserve space in the first chunk.
53
#define PERCPU_FIRST_CHUNK_RESERVE PERCPU_MODULE_RESERVE
55
#define PERCPU_FIRST_CHUNK_RESERVE 0
59
* pcpu_need_numa - determine percpu allocation needs to consider NUMA
61
* If NUMA is not configured or there is only one NUMA node available,
62
* there is no reason to consider NUMA. This function determines
63
* whether percpu allocation should consider NUMA or not.
66
* true if NUMA should be considered; otherwise, false.
68
static bool __init pcpu_need_numa(void)
70
#ifdef CONFIG_NEED_MULTIPLE_NODES
71
pg_data_t *last = NULL;
74
for_each_possible_cpu(cpu) {
75
int node = early_cpu_to_node(cpu);
77
if (node_online(node) && NODE_DATA(node) &&
78
last && last != NODE_DATA(node))
81
last = NODE_DATA(node);
88
* pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
89
* @cpu: cpu to allocate for
90
* @size: size allocation in bytes
93
* Allocate @size bytes aligned at @align for cpu @cpu. This wrapper
94
* does the right thing for NUMA regardless of the current
98
* Pointer to the allocated area on success, NULL on failure.
100
static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
103
const unsigned long goal = __pa(MAX_DMA_ADDRESS);
104
#ifdef CONFIG_NEED_MULTIPLE_NODES
105
int node = early_cpu_to_node(cpu);
108
if (!node_online(node) || !NODE_DATA(node)) {
109
ptr = __alloc_bootmem_nopanic(size, align, goal);
110
pr_info("cpu %d has no node %d or node-local memory\n",
112
pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
113
cpu, size, __pa(ptr));
115
ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
117
pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
118
"%016lx\n", cpu, size, node, __pa(ptr));
122
return __alloc_bootmem_nopanic(size, align, goal);
127
* Large page remap allocator
129
* This allocator uses PMD page as unit. A PMD page is allocated for
130
* each cpu and each is remapped into vmalloc area using PMD mapping.
131
* As PMD page is quite large, only part of it is used for the first
132
* chunk. Unused part is returned to the bootmem allocator.
134
* So, the PMD pages are mapped twice - once to the physical mapping
135
* and to the vmalloc area for the first percpu chunk. The double
136
* mapping does add one more PMD TLB entry pressure but still is much
137
* better than only using 4k mappings while still being NUMA friendly.
139
#ifdef CONFIG_NEED_MULTIPLE_NODES
145
static size_t pcpul_size;
146
static struct pcpul_ent *pcpul_map;
147
static struct vm_struct pcpul_vm;
149
static struct page * __init pcpul_get_page(unsigned int cpu, int pageno)
151
size_t off = (size_t)pageno << PAGE_SHIFT;
153
if (off >= pcpul_size)
156
return virt_to_page(pcpul_map[cpu].ptr + off);
159
static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen)
161
size_t map_size, dyn_size;
167
size_t vm_size = VMALLOC_END - VMALLOC_START;
168
size_t tot_size = num_possible_cpus() * PMD_SIZE;
170
/* on non-NUMA, embedding is better */
171
if (!pcpu_need_numa())
174
/* don't consume more than 20% of vmalloc area */
175
if (tot_size > vm_size / 5) {
176
pr_info("PERCPU: too large chunk size %zuMB for "
177
"large page remap\n", tot_size >> 20);
184
pr_warning("PERCPU: lpage allocator requires PSE\n");
189
* Currently supports only single page. Supporting multiple
190
* pages won't be too difficult if it ever becomes necessary.
192
pcpul_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE +
193
PERCPU_DYNAMIC_RESERVE);
194
if (pcpul_size > PMD_SIZE) {
195
pr_warning("PERCPU: static data is larger than large page, "
196
"can't use large page\n");
199
dyn_size = pcpul_size - static_size - PERCPU_FIRST_CHUNK_RESERVE;
201
/* allocate pointer array and alloc large pages */
202
map_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpul_map[0]));
203
pcpul_map = alloc_bootmem(map_size);
205
for_each_possible_cpu(cpu) {
206
pcpul_map[cpu].cpu = cpu;
207
pcpul_map[cpu].ptr = pcpu_alloc_bootmem(cpu, PMD_SIZE,
209
if (!pcpul_map[cpu].ptr) {
210
pr_warning("PERCPU: failed to allocate large page "
216
* Only use pcpul_size bytes and give back the rest.
218
* Ingo: The 2MB up-rounding bootmem is needed to make
219
* sure the partial 2MB page is still fully RAM - it's
220
* not well-specified to have a PAT-incompatible area
221
* (unmapped RAM, device memory, etc.) in that hole.
223
free_bootmem(__pa(pcpul_map[cpu].ptr + pcpul_size),
224
PMD_SIZE - pcpul_size);
226
memcpy(pcpul_map[cpu].ptr, __per_cpu_load, static_size);
229
/* allocate address and map */
230
pcpul_vm.flags = VM_ALLOC;
231
pcpul_vm.size = num_possible_cpus() * PMD_SIZE;
232
vm_area_register_early(&pcpul_vm, PMD_SIZE);
234
for_each_possible_cpu(cpu) {
237
pmd = populate_extra_pmd((unsigned long)pcpul_vm.addr +
239
pmd_v = pfn_pmd(page_to_pfn(virt_to_page(pcpul_map[cpu].ptr)),
244
/* we're ready, commit */
245
pr_info("PERCPU: Remapped at %p with large pages, static data "
246
"%zu bytes\n", pcpul_vm.addr, static_size);
248
ret = pcpu_setup_first_chunk(pcpul_get_page, static_size,
249
PERCPU_FIRST_CHUNK_RESERVE, dyn_size,
250
PMD_SIZE, pcpul_vm.addr, NULL);
252
/* sort pcpul_map array for pcpu_lpage_remapped() */
253
for (i = 0; i < num_possible_cpus() - 1; i++)
254
for (j = i + 1; j < num_possible_cpus(); j++)
255
if (pcpul_map[i].ptr > pcpul_map[j].ptr) {
256
struct pcpul_ent tmp = pcpul_map[i];
257
pcpul_map[i] = pcpul_map[j];
264
for_each_possible_cpu(cpu)
265
if (pcpul_map[cpu].ptr)
266
free_bootmem(__pa(pcpul_map[cpu].ptr), pcpul_size);
267
free_bootmem(__pa(pcpul_map), map_size);
272
* pcpu_lpage_remapped - determine whether a kaddr is in pcpul recycled area
273
* @kaddr: the kernel address in question
275
* Determine whether @kaddr falls in the pcpul recycled area. This is
276
* used by pageattr to detect VM aliases and break up the pcpu PMD
277
* mapping such that the same physical page is not mapped under
278
* different attributes.
280
* The recycled area is always at the tail of a partially used PMD
284
* Address of corresponding remapped pcpu address if match is found;
287
void *pcpu_lpage_remapped(void *kaddr)
289
void *pmd_addr = (void *)((unsigned long)kaddr & PMD_MASK);
290
unsigned long offset = (unsigned long)kaddr & ~PMD_MASK;
291
int left = 0, right = num_possible_cpus() - 1;
294
/* pcpul in use at all? */
298
/* okay, perform binary search */
299
while (left <= right) {
300
pos = (left + right) / 2;
302
if (pcpul_map[pos].ptr < pmd_addr)
304
else if (pcpul_map[pos].ptr > pmd_addr)
307
/* it shouldn't be in the area for the first chunk */
308
WARN_ON(offset < pcpul_size);
310
return pcpul_vm.addr +
311
pcpul_map[pos].cpu * PMD_SIZE + offset;
318
static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen)
325
* Embedding allocator
327
* The first chunk is sized to just contain the static area plus
328
* module and dynamic reserves and embedded into linear physical
329
* mapping so that it can use PMD mapping without additional TLB
332
static ssize_t __init setup_pcpu_embed(size_t static_size, bool chosen)
334
size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
337
* If large page isn't supported, there's no benefit in doing
338
* this. Also, embedding allocation doesn't play well with
341
if (!chosen && (!cpu_has_pse || pcpu_need_numa()))
344
return pcpu_embed_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE,
345
reserve - PERCPU_FIRST_CHUNK_RESERVE, -1);
351
* This is the basic allocator. Static percpu area is allocated
352
* page-by-page and most of initialization is done by the generic
355
static struct page **pcpu4k_pages __initdata;
356
static int pcpu4k_nr_static_pages __initdata;
358
static struct page * __init pcpu4k_get_page(unsigned int cpu, int pageno)
360
if (pageno < pcpu4k_nr_static_pages)
361
return pcpu4k_pages[cpu * pcpu4k_nr_static_pages + pageno];
365
static void __init pcpu4k_populate_pte(unsigned long addr)
367
populate_extra_pte(addr);
370
static ssize_t __init setup_pcpu_4k(size_t static_size)
377
pcpu4k_nr_static_pages = PFN_UP(static_size);
379
/* unaligned allocations can't be freed, round up to page size */
380
pages_size = PFN_ALIGN(pcpu4k_nr_static_pages * num_possible_cpus()
381
* sizeof(pcpu4k_pages[0]));
382
pcpu4k_pages = alloc_bootmem(pages_size);
384
/* allocate and copy */
386
for_each_possible_cpu(cpu)
387
for (i = 0; i < pcpu4k_nr_static_pages; i++) {
390
ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE);
392
pr_warning("PERCPU: failed to allocate "
393
"4k page for cpu%u\n", cpu);
397
memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE);
398
pcpu4k_pages[j++] = virt_to_page(ptr);
401
/* we're ready, commit */
402
pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n",
403
pcpu4k_nr_static_pages, static_size);
405
ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size,
406
PERCPU_FIRST_CHUNK_RESERVE, -1,
407
-1, NULL, pcpu4k_populate_pte);
412
free_bootmem(__pa(page_address(pcpu4k_pages[j])), PAGE_SIZE);
415
free_bootmem(__pa(pcpu4k_pages), pages_size);
419
/* for explicit first chunk allocator selection */
420
static char pcpu_chosen_alloc[16] __initdata;
422
static int __init percpu_alloc_setup(char *str)
424
strncpy(pcpu_chosen_alloc, str, sizeof(pcpu_chosen_alloc) - 1);
427
early_param("percpu_alloc", percpu_alloc_setup);
429
static inline void setup_percpu_segment(int cpu)
432
struct desc_struct gdt;
434
pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
435
0x2 | DESCTYPE_S, 0x8);
437
write_gdt_entry(get_cpu_gdt_table(cpu),
438
GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
442
void __init setup_per_cpu_areas(void)
444
size_t static_size = __per_cpu_end - __per_cpu_start;
447
size_t pcpu_unit_size;
450
pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
451
NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
454
* Allocate percpu area. If PSE is supported, try to make use
455
* of large page mappings. Please read comments on top of
456
* each allocator for details.
459
if (strlen(pcpu_chosen_alloc)) {
460
if (strcmp(pcpu_chosen_alloc, "4k")) {
461
if (!strcmp(pcpu_chosen_alloc, "lpage"))
462
ret = setup_pcpu_lpage(static_size, true);
463
else if (!strcmp(pcpu_chosen_alloc, "embed"))
464
ret = setup_pcpu_embed(static_size, true);
466
pr_warning("PERCPU: unknown allocator %s "
467
"specified\n", pcpu_chosen_alloc);
469
pr_warning("PERCPU: %s allocator failed (%zd), "
470
"falling back to 4k\n",
471
pcpu_chosen_alloc, ret);
474
ret = setup_pcpu_lpage(static_size, false);
476
ret = setup_pcpu_embed(static_size, false);
479
ret = setup_pcpu_4k(static_size);
481
panic("cannot allocate static percpu area (%zu bytes, err=%zd)",
484
pcpu_unit_size = ret;
486
/* alrighty, percpu areas up and running */
487
delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
488
for_each_possible_cpu(cpu) {
489
per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size;
490
per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
491
per_cpu(cpu_number, cpu) = cpu;
492
setup_percpu_segment(cpu);
493
setup_stack_canary_segment(cpu);
495
* Copy data used in early init routines from the
496
* initial arrays to the per cpu data areas. These
497
* arrays then become expendable and the *_early_ptr's
498
* are zeroed indicating that the static arrays are
17
501
#ifdef CONFIG_X86_LOCAL_APIC
18
unsigned int num_processors;
19
unsigned disabled_cpus __cpuinitdata;
20
/* Processor that is doing the boot up */
21
unsigned int boot_cpu_physical_apicid = -1U;
22
EXPORT_SYMBOL(boot_cpu_physical_apicid);
23
unsigned int max_physical_apicid;
25
/* Bitmask of physically existing CPUs */
26
physid_mask_t phys_cpu_present_map;
29
/* map cpu index to physical APIC ID */
30
DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
31
DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
32
EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
33
EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
35
#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
38
/* map cpu index to node index */
39
DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
40
EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
42
/* which logical CPUs are on which nodes */
43
cpumask_t *node_to_cpumask_map;
44
EXPORT_SYMBOL(node_to_cpumask_map);
46
/* setup node_to_cpumask_map */
47
static void __init setup_node_to_cpumask_map(void);
50
static inline void setup_node_to_cpumask_map(void) { }
53
#if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_X86_SMP)
55
* Copy data used in early init routines from the initial arrays to the
56
* per cpu data areas. These arrays then become expendable and the
57
* *_early_ptr's are zeroed indicating that the static arrays are gone.
59
static void __init setup_per_cpu_maps(void)
63
for_each_possible_cpu(cpu) {
64
502
per_cpu(x86_cpu_to_apicid, cpu) =
65
early_per_cpu_map(x86_cpu_to_apicid, cpu);
503
early_per_cpu_map(x86_cpu_to_apicid, cpu);
66
504
per_cpu(x86_bios_cpu_apicid, cpu) =
67
early_per_cpu_map(x86_bios_cpu_apicid, cpu);
505
early_per_cpu_map(x86_bios_cpu_apicid, cpu);
508
per_cpu(irq_stack_ptr, cpu) =
509
per_cpu(irq_stack_union.irq_stack, cpu) +
69
512
per_cpu(x86_cpu_to_node_map, cpu) =
70
early_per_cpu_map(x86_cpu_to_node_map, cpu);
513
early_per_cpu_map(x86_cpu_to_node_map, cpu);
517
* Up to this point, the boot CPU has been using .data.init
518
* area. Reload any changed state for the boot CPU.
520
if (cpu == boot_cpu_id)
521
switch_to_new_gdt(cpu);
74
524
/* indicate the early static arrays will soon be gone */
525
#ifdef CONFIG_X86_LOCAL_APIC
75
526
early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
76
527
early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
529
#if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
78
530
early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
84
* Great future not-so-futuristic plan: make i386 and x86_64 do it
87
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
88
EXPORT_SYMBOL(__per_cpu_offset);
89
static inline void setup_cpu_pda_map(void) { }
91
#elif !defined(CONFIG_SMP)
92
static inline void setup_cpu_pda_map(void) { }
94
#else /* CONFIG_SMP && CONFIG_X86_64 */
97
* Allocate cpu_pda pointer table and array via alloc_bootmem.
99
static void __init setup_cpu_pda_map(void)
102
struct x8664_pda **new_cpu_pda;
106
size = roundup(sizeof(struct x8664_pda), cache_line_size());
108
/* allocate cpu_pda array and pointer table */
110
unsigned long tsize = nr_cpu_ids * sizeof(void *);
111
unsigned long asize = size * (nr_cpu_ids - 1);
113
tsize = roundup(tsize, cache_line_size());
114
new_cpu_pda = alloc_bootmem(tsize + asize);
115
pda = (char *)new_cpu_pda + tsize;
118
/* initialize pointer table to static pda's */
119
for_each_possible_cpu(cpu) {
121
/* leave boot cpu pda in place */
122
new_cpu_pda[0] = cpu_pda(0);
125
new_cpu_pda[cpu] = (struct x8664_pda *)pda;
126
new_cpu_pda[cpu]->in_bootmem = 1;
130
/* point to new pointer table */
131
_cpu_pda = new_cpu_pda;
134
#endif /* CONFIG_SMP && CONFIG_X86_64 */
138
/* correctly size the local cpu masks */
139
static void __init setup_cpu_local_masks(void)
141
alloc_bootmem_cpumask_var(&cpu_initialized_mask);
142
alloc_bootmem_cpumask_var(&cpu_callin_mask);
143
alloc_bootmem_cpumask_var(&cpu_callout_mask);
144
alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
147
#else /* CONFIG_X86_32 */
149
static inline void setup_cpu_local_masks(void)
153
#endif /* CONFIG_X86_32 */
157
* Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
158
* Always point %gs to its beginning
160
void __init setup_per_cpu_areas(void)
162
ssize_t size, old_size;
165
unsigned long align = 1;
167
/* Setup cpu_pda map */
170
/* Copy section for each CPU (we discard the original) */
171
old_size = PERCPU_ENOUGH_ROOM;
172
align = max_t(unsigned long, PAGE_SIZE, align);
173
size = roundup(old_size, align);
175
pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
176
NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
178
pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size);
180
for_each_possible_cpu(cpu) {
181
#ifndef CONFIG_NEED_MULTIPLE_NODES
182
ptr = __alloc_bootmem(size, align,
183
__pa(MAX_DMA_ADDRESS));
185
int node = early_cpu_to_node(cpu);
186
if (!node_online(node) || !NODE_DATA(node)) {
187
ptr = __alloc_bootmem(size, align,
188
__pa(MAX_DMA_ADDRESS));
189
pr_info("cpu %d has no node %d or node-local memory\n",
191
pr_debug("per cpu data for cpu%d at %016lx\n",
194
ptr = __alloc_bootmem_node(NODE_DATA(node), size, align,
195
__pa(MAX_DMA_ADDRESS));
196
pr_debug("per cpu data for cpu%d on node%d at %016lx\n",
197
cpu, node, __pa(ptr));
533
#if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
535
* make sure boot cpu node_number is right, when boot cpu is on the
536
* node that doesn't have mem installed
538
per_cpu(node_number, boot_cpu_id) = cpu_to_node(boot_cpu_id);
200
per_cpu_offset(cpu) = ptr - __per_cpu_start;
201
memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
204
/* Setup percpu data maps */
205
setup_per_cpu_maps();
207
541
/* Setup node to cpumask map */
208
542
setup_node_to_cpumask_map();
210
544
/* Setup cpu initialized, callin, callout masks */
211
545
setup_cpu_local_masks();
219
* Allocate node_to_cpumask_map based on number of available nodes
220
* Requires node_possible_map to be valid.
222
* Note: node_to_cpumask() is not valid until after this is done.
224
static void __init setup_node_to_cpumask_map(void)
226
unsigned int node, num = 0;
229
/* setup nr_node_ids if not done yet */
230
if (nr_node_ids == MAX_NUMNODES) {
231
for_each_node_mask(node, node_possible_map)
233
nr_node_ids = num + 1;
236
/* allocate the map */
237
map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));
239
pr_debug("Node to cpumask map at %p for %d nodes\n",
242
/* node_to_cpumask() will now work */
243
node_to_cpumask_map = map;
246
void __cpuinit numa_set_node(int cpu, int node)
248
int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
250
if (cpu_pda(cpu) && node != NUMA_NO_NODE)
251
cpu_pda(cpu)->nodenumber = node;
254
cpu_to_node_map[cpu] = node;
256
else if (per_cpu_offset(cpu))
257
per_cpu(x86_cpu_to_node_map, cpu) = node;
260
pr_debug("Setting node for non-present cpu %d\n", cpu);
263
void __cpuinit numa_clear_node(int cpu)
265
numa_set_node(cpu, NUMA_NO_NODE);
268
#ifndef CONFIG_DEBUG_PER_CPU_MAPS
270
void __cpuinit numa_add_cpu(int cpu)
272
cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
275
void __cpuinit numa_remove_cpu(int cpu)
277
cpu_clear(cpu, node_to_cpumask_map[cpu_to_node(cpu)]);
280
#else /* CONFIG_DEBUG_PER_CPU_MAPS */
283
* --------- debug versions of the numa functions ---------
285
static void __cpuinit numa_set_cpumask(int cpu, int enable)
287
int node = cpu_to_node(cpu);
291
if (node_to_cpumask_map == NULL) {
292
printk(KERN_ERR "node_to_cpumask_map NULL\n");
297
mask = &node_to_cpumask_map[node];
301
cpu_clear(cpu, *mask);
303
cpulist_scnprintf(buf, sizeof(buf), mask);
304
printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
305
enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf);
308
void __cpuinit numa_add_cpu(int cpu)
310
numa_set_cpumask(cpu, 1);
313
void __cpuinit numa_remove_cpu(int cpu)
315
numa_set_cpumask(cpu, 0);
318
int cpu_to_node(int cpu)
320
if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
322
"cpu_to_node(%d): usage too early!\n", cpu);
324
return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
326
return per_cpu(x86_cpu_to_node_map, cpu);
328
EXPORT_SYMBOL(cpu_to_node);
331
* Same function as cpu_to_node() but used if called before the
332
* per_cpu areas are setup.
334
int early_cpu_to_node(int cpu)
336
if (early_per_cpu_ptr(x86_cpu_to_node_map))
337
return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
339
if (!per_cpu_offset(cpu)) {
341
"early_cpu_to_node(%d): no per_cpu area!\n", cpu);
345
return per_cpu(x86_cpu_to_node_map, cpu);
350
static const cpumask_t cpu_mask_none;
353
* Returns a pointer to the bitmask of CPUs on Node 'node'.
355
const cpumask_t *cpumask_of_node(int node)
357
if (node_to_cpumask_map == NULL) {
359
"cpumask_of_node(%d): no node_to_cpumask_map!\n",
362
return (const cpumask_t *)&cpu_online_map;
364
if (node >= nr_node_ids) {
366
"cpumask_of_node(%d): node > nr_node_ids(%d)\n",
369
return &cpu_mask_none;
371
return &node_to_cpumask_map[node];
373
EXPORT_SYMBOL(cpumask_of_node);
376
* Returns a bitmask of CPUs on Node 'node'.
378
* Side note: this function creates the returned cpumask on the stack
379
* so with a high NR_CPUS count, excessive stack space is used. The
380
* node_to_cpumask_ptr function should be used whenever possible.
382
cpumask_t node_to_cpumask(int node)
384
if (node_to_cpumask_map == NULL) {
386
"node_to_cpumask(%d): no node_to_cpumask_map!\n", node);
388
return cpu_online_map;
390
if (node >= nr_node_ids) {
392
"node_to_cpumask(%d): node > nr_node_ids(%d)\n",
395
return cpu_mask_none;
397
return node_to_cpumask_map[node];
399
EXPORT_SYMBOL(node_to_cpumask);
402
* --------- end of debug versions of the numa functions ---------
405
#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
407
#endif /* X86_64_NUMA */