~ubuntu-branches/ubuntu/precise/linux-ti-omap4/precise

« back to all changes in this revision

Viewing changes to arch/x86/kernel/smpboot.c

  • Committer: Bazaar Package Importer
  • Author(s): Paolo Pisati
  • Date: 2011-06-29 15:23:51 UTC
  • mfrom: (26.1.1 natty-proposed)
  • Revision ID: james.westby@ubuntu.com-20110629152351-xs96tm303d95rpbk
Tags: 3.0.0-1200.2
* Rebased against 3.0.0-6.7
* BSP from TI based on 3.0.0

Show diffs side-by-side

added added

removed removed

Lines of Context:
64
64
#include <asm/mtrr.h>
65
65
#include <asm/mwait.h>
66
66
#include <asm/apic.h>
 
67
#include <asm/io_apic.h>
67
68
#include <asm/setup.h>
68
69
#include <asm/uv/uv.h>
69
70
#include <linux/mc146818rtc.h>
71
72
#include <asm/smpboot_hooks.h>
72
73
#include <asm/i8259.h>
73
74
 
74
 
#ifdef CONFIG_X86_32
75
 
u8 apicid_2_node[MAX_APICID];
76
 
#endif
77
 
 
78
75
/* State of each CPU */
79
76
DEFINE_PER_CPU(int, cpu_state) = { 0 };
80
77
 
130
127
DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
131
128
EXPORT_PER_CPU_SYMBOL(cpu_core_map);
132
129
 
 
130
DEFINE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
 
131
 
133
132
/* Per CPU bogomips and other parameters */
134
133
DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
135
134
EXPORT_PER_CPU_SYMBOL(cpu_info);
136
135
 
137
136
atomic_t init_deasserted;
138
137
 
139
 
#if defined(CONFIG_NUMA) && defined(CONFIG_X86_32)
140
 
/* which node each logical CPU is on */
141
 
int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 };
142
 
EXPORT_SYMBOL(cpu_to_node_map);
143
 
 
144
 
/* set up a mapping between cpu and node. */
145
 
static void map_cpu_to_node(int cpu, int node)
146
 
{
147
 
        printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node);
148
 
        cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
149
 
        cpu_to_node_map[cpu] = node;
150
 
}
151
 
 
152
 
/* undo a mapping between cpu and node. */
153
 
static void unmap_cpu_to_node(int cpu)
154
 
{
155
 
        int node;
156
 
 
157
 
        printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu);
158
 
        for (node = 0; node < MAX_NUMNODES; node++)
159
 
                cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
160
 
        cpu_to_node_map[cpu] = 0;
161
 
}
162
 
#else /* !(CONFIG_NUMA && CONFIG_X86_32) */
163
 
#define map_cpu_to_node(cpu, node)      ({})
164
 
#define unmap_cpu_to_node(cpu)  ({})
165
 
#endif
166
 
 
167
 
#ifdef CONFIG_X86_32
168
 
static int boot_cpu_logical_apicid;
169
 
 
170
 
u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly =
171
 
                                        { [0 ... NR_CPUS-1] = BAD_APICID };
172
 
 
173
 
static void map_cpu_to_logical_apicid(void)
174
 
{
175
 
        int cpu = smp_processor_id();
176
 
        int apicid = logical_smp_processor_id();
177
 
        int node = apic->apicid_to_node(apicid);
178
 
 
179
 
        if (!node_online(node))
180
 
                node = first_online_node;
181
 
 
182
 
        cpu_2_logical_apicid[cpu] = apicid;
183
 
        map_cpu_to_node(cpu, node);
184
 
}
185
 
 
186
 
void numa_remove_cpu(int cpu)
187
 
{
188
 
        cpu_2_logical_apicid[cpu] = BAD_APICID;
189
 
        unmap_cpu_to_node(cpu);
190
 
}
191
 
#else
192
 
#define map_cpu_to_logical_apicid()  do {} while (0)
193
 
#endif
194
 
 
195
138
/*
196
139
 * Report back to the Boot Processor.
197
140
 * Running on AP.
259
202
                apic->smp_callin_clear_local_apic();
260
203
        setup_local_APIC();
261
204
        end_local_APIC_setup();
262
 
        map_cpu_to_logical_apicid();
263
205
 
264
206
        /*
265
207
         * Need to setup vector mappings before we enable interrupts.
343
285
        per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
344
286
        x86_platform.nmi_init();
345
287
 
 
288
        /*
 
289
         * Wait until the cpu which brought this one up marked it
 
290
         * online before enabling interrupts. If we don't do that then
 
291
         * we can end up waking up the softirq thread before this cpu
 
292
         * reached the active state, which makes the scheduler unhappy
 
293
         * and schedule the softirq thread on the wrong cpu. This is
 
294
         * only observable with forced threaded interrupts, but in
 
295
         * theory it could also happen w/o them. It's just way harder
 
296
         * to achieve.
 
297
         */
 
298
        while (!cpumask_test_cpu(smp_processor_id(), cpu_active_mask))
 
299
                cpu_relax();
 
300
 
346
301
        /* enable local interrupts */
347
302
        local_irq_enable();
348
303
 
355
310
        cpu_idle();
356
311
}
357
312
 
358
 
#ifdef CONFIG_CPUMASK_OFFSTACK
359
 
/* In this case, llc_shared_map is a pointer to a cpumask. */
360
 
static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst,
361
 
                                    const struct cpuinfo_x86 *src)
362
 
{
363
 
        struct cpumask *llc = dst->llc_shared_map;
364
 
        *dst = *src;
365
 
        dst->llc_shared_map = llc;
366
 
}
367
 
#else
368
 
static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst,
369
 
                                    const struct cpuinfo_x86 *src)
370
 
{
371
 
        *dst = *src;
372
 
}
373
 
#endif /* CONFIG_CPUMASK_OFFSTACK */
374
 
 
375
313
/*
376
314
 * The bootstrap kernel entry code has set these up. Save them for
377
315
 * a given CPU
381
319
{
382
320
        struct cpuinfo_x86 *c = &cpu_data(id);
383
321
 
384
 
        copy_cpuinfo_x86(c, &boot_cpu_data);
 
322
        *c = boot_cpu_data;
385
323
        c->cpu_index = id;
386
324
        if (id != 0)
387
325
                identify_secondary_cpu(c);
389
327
 
390
328
static void __cpuinit link_thread_siblings(int cpu1, int cpu2)
391
329
{
392
 
        struct cpuinfo_x86 *c1 = &cpu_data(cpu1);
393
 
        struct cpuinfo_x86 *c2 = &cpu_data(cpu2);
394
 
 
395
330
        cpumask_set_cpu(cpu1, cpu_sibling_mask(cpu2));
396
331
        cpumask_set_cpu(cpu2, cpu_sibling_mask(cpu1));
397
332
        cpumask_set_cpu(cpu1, cpu_core_mask(cpu2));
398
333
        cpumask_set_cpu(cpu2, cpu_core_mask(cpu1));
399
 
        cpumask_set_cpu(cpu1, c2->llc_shared_map);
400
 
        cpumask_set_cpu(cpu2, c1->llc_shared_map);
 
334
        cpumask_set_cpu(cpu1, cpu_llc_shared_mask(cpu2));
 
335
        cpumask_set_cpu(cpu2, cpu_llc_shared_mask(cpu1));
401
336
}
402
337
 
403
338
 
414
349
 
415
350
                        if (cpu_has(c, X86_FEATURE_TOPOEXT)) {
416
351
                                if (c->phys_proc_id == o->phys_proc_id &&
 
352
                                    per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i) &&
417
353
                                    c->compute_unit_id == o->compute_unit_id)
418
354
                                        link_thread_siblings(cpu, i);
419
355
                        } else if (c->phys_proc_id == o->phys_proc_id &&
425
361
                cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
426
362
        }
427
363
 
428
 
        cpumask_set_cpu(cpu, c->llc_shared_map);
 
364
        cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
429
365
 
430
366
        if (__this_cpu_read(cpu_info.x86_max_cores) == 1) {
431
367
                cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu));
436
372
        for_each_cpu(i, cpu_sibling_setup_mask) {
437
373
                if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
438
374
                    per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
439
 
                        cpumask_set_cpu(i, c->llc_shared_map);
440
 
                        cpumask_set_cpu(cpu, cpu_data(i).llc_shared_map);
 
375
                        cpumask_set_cpu(i, cpu_llc_shared_mask(cpu));
 
376
                        cpumask_set_cpu(cpu, cpu_llc_shared_mask(i));
441
377
                }
442
378
                if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
443
379
                        cpumask_set_cpu(i, cpu_core_mask(cpu));
476
412
            !(cpu_has(c, X86_FEATURE_AMD_DCM)))
477
413
                return cpu_core_mask(cpu);
478
414
        else
479
 
                return c->llc_shared_map;
 
415
                return cpu_llc_shared_mask(cpu);
480
416
}
481
417
 
482
418
static void impress_friends(void)
788
724
        stack_start  = c_idle.idle->thread.sp;
789
725
 
790
726
        /* start_ip had better be page-aligned! */
791
 
        start_ip = setup_trampoline();
 
727
        start_ip = trampoline_address();
792
728
 
793
729
        /* So we see what's up */
794
730
        announce_cpu(cpu, apicid);
798
734
         * the targeted processor.
799
735
         */
800
736
 
 
737
        printk(KERN_DEBUG "smpboot cpu %d: start_ip = %lx\n", cpu, start_ip);
 
738
 
801
739
        atomic_set(&init_deasserted, 0);
802
740
 
803
741
        if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
851
789
                        pr_debug("CPU%d: has booted.\n", cpu);
852
790
                else {
853
791
                        boot_error = 1;
854
 
                        if (*((volatile unsigned char *)trampoline_base)
855
 
                                        == 0xA5)
 
792
                        if (*(volatile u32 *)TRAMPOLINE_SYM(trampoline_status)
 
793
                            == 0xA5A5A5A5)
856
794
                                /* trampoline started but...? */
857
795
                                pr_err("CPU%d: Stuck ??\n", cpu);
858
796
                        else
878
816
        }
879
817
 
880
818
        /* mark "stuck" area as not stuck */
881
 
        *((volatile unsigned long *)trampoline_base) = 0;
 
819
        *(volatile u32 *)TRAMPOLINE_SYM(trampoline_status) = 0;
882
820
 
883
821
        if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
884
822
                /*
945
883
        return 0;
946
884
}
947
885
 
 
886
/**
 
887
 * arch_disable_smp_support() - disables SMP support for x86 at runtime
 
888
 */
 
889
void arch_disable_smp_support(void)
 
890
{
 
891
        disable_ioapic_support();
 
892
}
 
893
 
948
894
/*
949
895
 * Fall back to non SMP mode after errors.
950
896
 *
960
906
                physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
961
907
        else
962
908
                physid_set_mask_of_physid(0, &phys_cpu_present_map);
963
 
        map_cpu_to_logical_apicid();
964
909
        cpumask_set_cpu(0, cpu_sibling_mask(0));
965
910
        cpumask_set_cpu(0, cpu_core_mask(0));
966
911
}
1045
990
                                "(tell your hw vendor)\n");
1046
991
                }
1047
992
                smpboot_clear_io_apic();
1048
 
                arch_disable_smp_support();
 
993
                disable_ioapic_support();
1049
994
                return -1;
1050
995
        }
1051
996
 
1089
1034
 
1090
1035
        preempt_disable();
1091
1036
        smp_cpu_index_default();
1092
 
        memcpy(__this_cpu_ptr(&cpu_info), &boot_cpu_data, sizeof(cpu_info));
1093
 
        cpumask_copy(cpu_callin_mask, cpumask_of(0));
1094
 
        mb();
 
1037
 
1095
1038
        /*
1096
1039
         * Setup boot CPU information
1097
1040
         */
1098
1041
        smp_store_cpu_info(0); /* Final full version of the data */
1099
 
#ifdef CONFIG_X86_32
1100
 
        boot_cpu_logical_apicid = logical_smp_processor_id();
1101
 
#endif
 
1042
        cpumask_copy(cpu_callin_mask, cpumask_of(0));
 
1043
        mb();
 
1044
 
1102
1045
        current_thread_info()->cpu = 0;  /* needed? */
1103
1046
        for_each_possible_cpu(i) {
1104
1047
                zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
1105
1048
                zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
1106
 
                zalloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL);
 
1049
                zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
1107
1050
        }
1108
1051
        set_cpu_sibling_map(0);
1109
1052
 
1139
1082
 
1140
1083
        bsp_end_local_APIC_setup();
1141
1084
 
1142
 
        map_cpu_to_logical_apicid();
1143
 
 
1144
1085
        if (apic->setup_portio_remap)
1145
1086
                apic->setup_portio_remap();
1146
1087
 
1379
1320
{
1380
1321
        idle_task_exit();
1381
1322
        reset_lazy_tlbstate();
1382
 
        c1e_remove_cpu(raw_smp_processor_id());
 
1323
        amd_e400_remove_cpu(raw_smp_processor_id());
1383
1324
 
1384
1325
        mb();
1385
1326
        /* Ack it */
1404
1345
        void *mwait_ptr;
1405
1346
        struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info);
1406
1347
 
1407
 
        if (!(cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)))
 
1348
        if (!(this_cpu_has(X86_FEATURE_MWAIT) && mwait_usable(c)))
1408
1349
                return;
1409
 
        if (!cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLSH))
 
1350
        if (!this_cpu_has(X86_FEATURE_CLFLSH))
1410
1351
                return;
1411
1352
        if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF)
1412
1353
                return;