4
* This file implements the Xen versions of smp_ops. SMP under Xen is
5
* very straightforward. Bringing a CPU up is simply a matter of
6
* loading its initial context and setting it running.
8
* IPIs are handled through the Xen event mechanism.
10
* Because virtual CPUs can be scheduled onto any real CPU, there's no
11
* useful topology information for the kernel to make use of. As a
12
* result, all CPUs are treated as if they're single-core and
15
#include <linux/sched.h>
16
#include <linux/err.h>
17
#include <linux/slab.h>
18
#include <linux/smp.h>
20
#include <asm/paravirt.h>
22
#include <asm/pgtable.h>
25
#include <xen/interface/xen.h>
26
#include <xen/interface/vcpu.h>
28
#include <asm/xen/interface.h>
29
#include <asm/xen/hypercall.h>
33
#include <xen/events.h>
35
#include <xen/hvc-console.h>
39
cpumask_var_t xen_cpu_initialized_map;
41
static DEFINE_PER_CPU(int, xen_resched_irq);
42
static DEFINE_PER_CPU(int, xen_callfunc_irq);
43
static DEFINE_PER_CPU(int, xen_callfuncsingle_irq);
44
static DEFINE_PER_CPU(int, xen_debug_irq) = -1;
46
static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
47
static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
50
* Reschedule call back.
52
static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
54
inc_irq_stat(irq_resched_count);
60
static void __cpuinit cpu_bringup(void)
62
int cpu = smp_processor_id();
65
touch_softlockup_watchdog();
68
xen_enable_sysenter();
71
cpu = smp_processor_id();
72
smp_store_cpu_info(cpu);
73
cpu_data(cpu).x86_max_cores = 1;
74
set_cpu_sibling_map(cpu);
76
xen_setup_cpu_clockevents();
78
set_cpu_online(cpu, true);
79
percpu_write(cpu_state, CPU_ONLINE);
82
/* We can take interrupts now: we're officially "up". */
85
wmb(); /* make sure everything is out */
88
static void __cpuinit cpu_bringup_and_idle(void)
94
static int xen_smp_intr_init(unsigned int cpu)
97
const char *resched_name, *callfunc_name, *debug_name;
99
resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
100
rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
102
xen_reschedule_interrupt,
103
IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
108
per_cpu(xen_resched_irq, cpu) = rc;
110
callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
111
rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
113
xen_call_function_interrupt,
114
IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
119
per_cpu(xen_callfunc_irq, cpu) = rc;
121
debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
122
rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
123
IRQF_DISABLED | IRQF_PERCPU | IRQF_NOBALANCING,
127
per_cpu(xen_debug_irq, cpu) = rc;
129
callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
130
rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
132
xen_call_function_single_interrupt,
133
IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
138
per_cpu(xen_callfuncsingle_irq, cpu) = rc;
143
if (per_cpu(xen_resched_irq, cpu) >= 0)
144
unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
145
if (per_cpu(xen_callfunc_irq, cpu) >= 0)
146
unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
147
if (per_cpu(xen_debug_irq, cpu) >= 0)
148
unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
149
if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0)
150
unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu),
156
static void __init xen_fill_possible_map(void)
160
if (xen_initial_domain())
163
for (i = 0; i < nr_cpu_ids; i++) {
164
rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
167
set_cpu_possible(i, true);
172
static void __init xen_filter_cpu_maps(void)
176
if (!xen_initial_domain())
181
for (i = 0; i < nr_cpu_ids; i++) {
182
rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
185
set_cpu_possible(i, true);
187
set_cpu_possible(i, false);
188
set_cpu_present(i, false);
193
static void __init xen_smp_prepare_boot_cpu(void)
195
BUG_ON(smp_processor_id() != 0);
196
native_smp_prepare_boot_cpu();
198
/* We've switched to the "real" per-cpu gdt, so make sure the
199
old memory can be recycled */
200
make_lowmem_page_readwrite(xen_initial_gdt);
202
xen_filter_cpu_maps();
203
xen_setup_vcpu_info_placement();
206
static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
211
if (skip_ioapic_setup) {
212
char *m = (max_cpus == 0) ?
213
"The nosmp parameter is incompatible with Xen; " \
214
"use Xen dom0_max_vcpus=1 parameter" :
215
"The noapic parameter is incompatible with Xen";
220
xen_init_lock_cpu(0);
222
smp_store_cpu_info(0);
223
cpu_data(0).x86_max_cores = 1;
225
for_each_possible_cpu(i) {
226
zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
227
zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
228
zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
230
set_cpu_sibling_map(0);
232
if (xen_smp_intr_init(0))
235
if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
236
panic("could not allocate xen_cpu_initialized_map\n");
238
cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));
240
/* Restrict the possible_map according to max_cpus. */
241
while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
242
for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
244
set_cpu_possible(cpu, false);
247
for_each_possible_cpu (cpu) {
248
struct task_struct *idle;
253
idle = fork_idle(cpu);
255
panic("failed fork for CPU %d", cpu);
257
set_cpu_present(cpu, true);
262
cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
264
struct vcpu_guest_context *ctxt;
265
struct desc_struct *gdt;
266
unsigned long gdt_mfn;
268
if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
271
ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
275
gdt = get_cpu_gdt_table(cpu);
277
ctxt->flags = VGCF_IN_KERNEL;
278
ctxt->user_regs.ds = __USER_DS;
279
ctxt->user_regs.es = __USER_DS;
280
ctxt->user_regs.ss = __KERNEL_DS;
282
ctxt->user_regs.fs = __KERNEL_PERCPU;
283
ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
285
ctxt->gs_base_kernel = per_cpu_offset(cpu);
287
ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
288
ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
290
memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
292
xen_copy_trap_info(ctxt->trap_ctxt);
296
BUG_ON((unsigned long)gdt & ~PAGE_MASK);
298
gdt_mfn = arbitrary_virt_to_mfn(gdt);
299
make_lowmem_page_readonly(gdt);
300
make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));
302
ctxt->gdt_frames[0] = gdt_mfn;
303
ctxt->gdt_ents = GDT_ENTRIES;
305
ctxt->user_regs.cs = __KERNEL_CS;
306
ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
308
ctxt->kernel_ss = __KERNEL_DS;
309
ctxt->kernel_sp = idle->thread.sp0;
312
ctxt->event_callback_cs = __KERNEL_CS;
313
ctxt->failsafe_callback_cs = __KERNEL_CS;
315
ctxt->event_callback_eip = (unsigned long)xen_hypervisor_callback;
316
ctxt->failsafe_callback_eip = (unsigned long)xen_failsafe_callback;
318
per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
319
ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
321
if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))
328
static int __cpuinit xen_cpu_up(unsigned int cpu)
330
struct task_struct *idle = idle_task(cpu);
333
per_cpu(current_task, cpu) = idle;
337
clear_tsk_thread_flag(idle, TIF_FORK);
338
per_cpu(kernel_stack, cpu) =
339
(unsigned long)task_stack_page(idle) -
340
KERNEL_STACK_OFFSET + THREAD_SIZE;
342
xen_setup_runstate_info(cpu);
343
xen_setup_timer(cpu);
344
xen_init_lock_cpu(cpu);
346
per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
348
/* make sure interrupts start blocked */
349
per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
351
rc = cpu_initialize_context(cpu, idle);
355
if (num_online_cpus() == 1)
356
alternatives_smp_switch(1);
358
rc = xen_smp_intr_init(cpu);
362
rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
365
while(per_cpu(cpu_state, cpu) != CPU_ONLINE) {
366
HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
373
static void xen_smp_cpus_done(unsigned int max_cpus)
377
#ifdef CONFIG_HOTPLUG_CPU
378
static int xen_cpu_disable(void)
380
unsigned int cpu = smp_processor_id();
384
cpu_disable_common();
386
load_cr3(swapper_pg_dir);
390
static void xen_cpu_die(unsigned int cpu)
392
while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
393
current->state = TASK_UNINTERRUPTIBLE;
394
schedule_timeout(HZ/10);
396
unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
397
unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
398
unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
399
unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
400
xen_uninit_lock_cpu(cpu);
401
xen_teardown_timer(cpu);
403
if (num_online_cpus() == 1)
404
alternatives_smp_switch(0);
407
static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */
410
HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
414
#else /* !CONFIG_HOTPLUG_CPU */
415
static int xen_cpu_disable(void)
420
static void xen_cpu_die(unsigned int cpu)
425
static void xen_play_dead(void)
431
static void stop_self(void *v)
433
int cpu = smp_processor_id();
435
/* make sure we're not pinning something down */
436
load_cr3(swapper_pg_dir);
437
/* should set up a minimal gdt */
439
set_cpu_online(cpu, false);
441
HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL);
445
static void xen_stop_other_cpus(int wait)
447
smp_call_function(stop_self, NULL, wait);
450
static void xen_smp_send_reschedule(int cpu)
452
xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
455
static void xen_send_IPI_mask(const struct cpumask *mask,
456
enum ipi_vector vector)
460
for_each_cpu_and(cpu, mask, cpu_online_mask)
461
xen_send_IPI_one(cpu, vector);
464
static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
468
xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
470
/* Make sure other vcpus get a chance to run if they need to. */
471
for_each_cpu(cpu, mask) {
472
if (xen_vcpu_stolen(cpu)) {
473
HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
479
static void xen_smp_send_call_function_single_ipi(int cpu)
481
xen_send_IPI_mask(cpumask_of(cpu),
482
XEN_CALL_FUNCTION_SINGLE_VECTOR);
485
static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
488
generic_smp_call_function_interrupt();
489
inc_irq_stat(irq_call_count);
495
static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
498
generic_smp_call_function_single_interrupt();
499
inc_irq_stat(irq_call_count);
505
static const struct smp_ops xen_smp_ops __initconst = {
506
.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
507
.smp_prepare_cpus = xen_smp_prepare_cpus,
508
.smp_cpus_done = xen_smp_cpus_done,
510
.cpu_up = xen_cpu_up,
511
.cpu_die = xen_cpu_die,
512
.cpu_disable = xen_cpu_disable,
513
.play_dead = xen_play_dead,
515
.stop_other_cpus = xen_stop_other_cpus,
516
.smp_send_reschedule = xen_smp_send_reschedule,
518
.send_call_func_ipi = xen_smp_send_call_function_ipi,
519
.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi,
522
void __init xen_smp_init(void)
524
smp_ops = xen_smp_ops;
525
xen_fill_possible_map();
526
xen_init_spinlocks();
529
static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
531
native_smp_prepare_cpus(max_cpus);
532
WARN_ON(xen_smp_intr_init(0));
534
xen_init_lock_cpu(0);
537
static int __cpuinit xen_hvm_cpu_up(unsigned int cpu)
540
rc = native_cpu_up(cpu);
541
WARN_ON (xen_smp_intr_init(cpu));
545
static void xen_hvm_cpu_die(unsigned int cpu)
547
unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
548
unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
549
unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
550
unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
554
void __init xen_hvm_smp_init(void)
556
if (!xen_have_vector_callback)
558
smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
559
smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
560
smp_ops.cpu_up = xen_hvm_cpu_up;
561
smp_ops.cpu_die = xen_hvm_cpu_die;
562
smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
563
smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;