130
127
DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
131
128
EXPORT_PER_CPU_SYMBOL(cpu_core_map);
130
DEFINE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
133
132
/* Per CPU bogomips and other parameters */
134
133
DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
135
134
EXPORT_PER_CPU_SYMBOL(cpu_info);
137
136
atomic_t init_deasserted;
139
#if defined(CONFIG_NUMA) && defined(CONFIG_X86_32)
140
/* which node each logical CPU is on */
141
int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 };
142
EXPORT_SYMBOL(cpu_to_node_map);
144
/* set up a mapping between cpu and node. */
145
static void map_cpu_to_node(int cpu, int node)
147
printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node);
148
cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
149
cpu_to_node_map[cpu] = node;
152
/* undo a mapping between cpu and node. */
153
static void unmap_cpu_to_node(int cpu)
157
printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu);
158
for (node = 0; node < MAX_NUMNODES; node++)
159
cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
160
cpu_to_node_map[cpu] = 0;
162
#else /* !(CONFIG_NUMA && CONFIG_X86_32) */
163
#define map_cpu_to_node(cpu, node) ({})
164
#define unmap_cpu_to_node(cpu) ({})
168
static int boot_cpu_logical_apicid;
170
u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly =
171
{ [0 ... NR_CPUS-1] = BAD_APICID };
173
static void map_cpu_to_logical_apicid(void)
175
int cpu = smp_processor_id();
176
int apicid = logical_smp_processor_id();
177
int node = apic->apicid_to_node(apicid);
179
if (!node_online(node))
180
node = first_online_node;
182
cpu_2_logical_apicid[cpu] = apicid;
183
map_cpu_to_node(cpu, node);
186
void numa_remove_cpu(int cpu)
188
cpu_2_logical_apicid[cpu] = BAD_APICID;
189
unmap_cpu_to_node(cpu);
192
#define map_cpu_to_logical_apicid() do {} while (0)
196
139
* Report back to the Boot Processor.
343
285
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
344
286
x86_platform.nmi_init();
289
* Wait until the cpu which brought this one up marked it
290
* online before enabling interrupts. If we don't do that then
291
* we can end up waking up the softirq thread before this cpu
292
* reached the active state, which makes the scheduler unhappy
293
* and schedule the softirq thread on the wrong cpu. This is
294
* only observable with forced threaded interrupts, but in
295
* theory it could also happen w/o them. It's just way harder
298
while (!cpumask_test_cpu(smp_processor_id(), cpu_active_mask))
346
301
/* enable local interrupts */
347
302
local_irq_enable();
358
#ifdef CONFIG_CPUMASK_OFFSTACK
359
/* In this case, llc_shared_map is a pointer to a cpumask. */
360
static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst,
361
const struct cpuinfo_x86 *src)
363
struct cpumask *llc = dst->llc_shared_map;
365
dst->llc_shared_map = llc;
368
static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst,
369
const struct cpuinfo_x86 *src)
373
#endif /* CONFIG_CPUMASK_OFFSTACK */
376
314
* The bootstrap kernel entry code has set these up. Save them for
390
328
static void __cpuinit link_thread_siblings(int cpu1, int cpu2)
392
struct cpuinfo_x86 *c1 = &cpu_data(cpu1);
393
struct cpuinfo_x86 *c2 = &cpu_data(cpu2);
395
330
cpumask_set_cpu(cpu1, cpu_sibling_mask(cpu2));
396
331
cpumask_set_cpu(cpu2, cpu_sibling_mask(cpu1));
397
332
cpumask_set_cpu(cpu1, cpu_core_mask(cpu2));
398
333
cpumask_set_cpu(cpu2, cpu_core_mask(cpu1));
399
cpumask_set_cpu(cpu1, c2->llc_shared_map);
400
cpumask_set_cpu(cpu2, c1->llc_shared_map);
334
cpumask_set_cpu(cpu1, cpu_llc_shared_mask(cpu2));
335
cpumask_set_cpu(cpu2, cpu_llc_shared_mask(cpu1));
415
350
if (cpu_has(c, X86_FEATURE_TOPOEXT)) {
416
351
if (c->phys_proc_id == o->phys_proc_id &&
352
per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i) &&
417
353
c->compute_unit_id == o->compute_unit_id)
418
354
link_thread_siblings(cpu, i);
419
355
} else if (c->phys_proc_id == o->phys_proc_id &&
425
361
cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
428
cpumask_set_cpu(cpu, c->llc_shared_map);
364
cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
430
366
if (__this_cpu_read(cpu_info.x86_max_cores) == 1) {
431
367
cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu));
436
372
for_each_cpu(i, cpu_sibling_setup_mask) {
437
373
if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
438
374
per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
439
cpumask_set_cpu(i, c->llc_shared_map);
440
cpumask_set_cpu(cpu, cpu_data(i).llc_shared_map);
375
cpumask_set_cpu(i, cpu_llc_shared_mask(cpu));
376
cpumask_set_cpu(cpu, cpu_llc_shared_mask(i));
442
378
if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
443
379
cpumask_set_cpu(i, cpu_core_mask(cpu));
788
724
stack_start = c_idle.idle->thread.sp;
790
726
/* start_ip had better be page-aligned! */
791
start_ip = setup_trampoline();
727
start_ip = trampoline_address();
793
729
/* So we see what's up */
794
730
announce_cpu(cpu, apicid);
851
789
pr_debug("CPU%d: has booted.\n", cpu);
854
if (*((volatile unsigned char *)trampoline_base)
792
if (*(volatile u32 *)TRAMPOLINE_SYM(trampoline_status)
856
794
/* trampoline started but...? */
857
795
pr_err("CPU%d: Stuck ??\n", cpu);
960
906
physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
962
908
physid_set_mask_of_physid(0, &phys_cpu_present_map);
963
map_cpu_to_logical_apicid();
964
909
cpumask_set_cpu(0, cpu_sibling_mask(0));
965
910
cpumask_set_cpu(0, cpu_core_mask(0));
1090
1035
preempt_disable();
1091
1036
smp_cpu_index_default();
1092
memcpy(__this_cpu_ptr(&cpu_info), &boot_cpu_data, sizeof(cpu_info));
1093
cpumask_copy(cpu_callin_mask, cpumask_of(0));
1096
1039
* Setup boot CPU information
1098
1041
smp_store_cpu_info(0); /* Final full version of the data */
1099
#ifdef CONFIG_X86_32
1100
boot_cpu_logical_apicid = logical_smp_processor_id();
1042
cpumask_copy(cpu_callin_mask, cpumask_of(0));
1102
1045
current_thread_info()->cpu = 0; /* needed? */
1103
1046
for_each_possible_cpu(i) {
1104
1047
zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
1105
1048
zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
1106
zalloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL);
1049
zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
1108
1051
set_cpu_sibling_map(0);
1404
1345
void *mwait_ptr;
1405
1346
struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info);
1407
if (!(cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)))
1348
if (!(this_cpu_has(X86_FEATURE_MWAIT) && mwait_usable(c)))
1409
if (!cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLSH))
1350
if (!this_cpu_has(X86_FEATURE_CLFLSH))
1411
1352
if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF)