~ubuntu-branches/ubuntu/precise/linux-lowlatency/precise

« back to all changes in this revision

Viewing changes to arch/x86/xen/smp.c

  • Committer: Package Import Robot
  • Author(s): Alessio Igor Bogani
  • Date: 2011-10-26 11:13:05 UTC
  • Revision ID: package-import@ubuntu.com-20111026111305-tz023xykf0i6eosh
Tags: upstream-3.2.0
ImportĀ upstreamĀ versionĀ 3.2.0

Show diffs side-by-side

added added

removed removed

Lines of Context:
 
1
/*
 
2
 * Xen SMP support
 
3
 *
 
4
 * This file implements the Xen versions of smp_ops.  SMP under Xen is
 
5
 * very straightforward.  Bringing a CPU up is simply a matter of
 
6
 * loading its initial context and setting it running.
 
7
 *
 
8
 * IPIs are handled through the Xen event mechanism.
 
9
 *
 
10
 * Because virtual CPUs can be scheduled onto any real CPU, there's no
 
11
 * useful topology information for the kernel to make use of.  As a
 
12
 * result, all CPUs are treated as if they're single-core and
 
13
 * single-threaded.
 
14
 */
 
15
#include <linux/sched.h>
 
16
#include <linux/err.h>
 
17
#include <linux/slab.h>
 
18
#include <linux/smp.h>
 
19
 
 
20
#include <asm/paravirt.h>
 
21
#include <asm/desc.h>
 
22
#include <asm/pgtable.h>
 
23
#include <asm/cpu.h>
 
24
 
 
25
#include <xen/interface/xen.h>
 
26
#include <xen/interface/vcpu.h>
 
27
 
 
28
#include <asm/xen/interface.h>
 
29
#include <asm/xen/hypercall.h>
 
30
 
 
31
#include <xen/xen.h>
 
32
#include <xen/page.h>
 
33
#include <xen/events.h>
 
34
 
 
35
#include <xen/hvc-console.h>
 
36
#include "xen-ops.h"
 
37
#include "mmu.h"
 
38
 
 
39
cpumask_var_t xen_cpu_initialized_map;
 
40
 
 
41
static DEFINE_PER_CPU(int, xen_resched_irq);
 
42
static DEFINE_PER_CPU(int, xen_callfunc_irq);
 
43
static DEFINE_PER_CPU(int, xen_callfuncsingle_irq);
 
44
static DEFINE_PER_CPU(int, xen_debug_irq) = -1;
 
45
 
 
46
static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
 
47
static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
 
48
 
 
49
/*
 
50
 * Reschedule call back.
 
51
 */
 
52
static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
 
53
{
 
54
        inc_irq_stat(irq_resched_count);
 
55
        scheduler_ipi();
 
56
 
 
57
        return IRQ_HANDLED;
 
58
}
 
59
 
 
60
static void __cpuinit cpu_bringup(void)
 
61
{
 
62
        int cpu = smp_processor_id();
 
63
 
 
64
        cpu_init();
 
65
        touch_softlockup_watchdog();
 
66
        preempt_disable();
 
67
 
 
68
        xen_enable_sysenter();
 
69
        xen_enable_syscall();
 
70
 
 
71
        cpu = smp_processor_id();
 
72
        smp_store_cpu_info(cpu);
 
73
        cpu_data(cpu).x86_max_cores = 1;
 
74
        set_cpu_sibling_map(cpu);
 
75
 
 
76
        xen_setup_cpu_clockevents();
 
77
 
 
78
        set_cpu_online(cpu, true);
 
79
        percpu_write(cpu_state, CPU_ONLINE);
 
80
        wmb();
 
81
 
 
82
        /* We can take interrupts now: we're officially "up". */
 
83
        local_irq_enable();
 
84
 
 
85
        wmb();                  /* make sure everything is out */
 
86
}
 
87
 
 
88
static void __cpuinit cpu_bringup_and_idle(void)
 
89
{
 
90
        cpu_bringup();
 
91
        cpu_idle();
 
92
}
 
93
 
 
94
static int xen_smp_intr_init(unsigned int cpu)
 
95
{
 
96
        int rc;
 
97
        const char *resched_name, *callfunc_name, *debug_name;
 
98
 
 
99
        resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
 
100
        rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
 
101
                                    cpu,
 
102
                                    xen_reschedule_interrupt,
 
103
                                    IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
 
104
                                    resched_name,
 
105
                                    NULL);
 
106
        if (rc < 0)
 
107
                goto fail;
 
108
        per_cpu(xen_resched_irq, cpu) = rc;
 
109
 
 
110
        callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
 
111
        rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
 
112
                                    cpu,
 
113
                                    xen_call_function_interrupt,
 
114
                                    IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
 
115
                                    callfunc_name,
 
116
                                    NULL);
 
117
        if (rc < 0)
 
118
                goto fail;
 
119
        per_cpu(xen_callfunc_irq, cpu) = rc;
 
120
 
 
121
        debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
 
122
        rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
 
123
                                     IRQF_DISABLED | IRQF_PERCPU | IRQF_NOBALANCING,
 
124
                                     debug_name, NULL);
 
125
        if (rc < 0)
 
126
                goto fail;
 
127
        per_cpu(xen_debug_irq, cpu) = rc;
 
128
 
 
129
        callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
 
130
        rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
 
131
                                    cpu,
 
132
                                    xen_call_function_single_interrupt,
 
133
                                    IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
 
134
                                    callfunc_name,
 
135
                                    NULL);
 
136
        if (rc < 0)
 
137
                goto fail;
 
138
        per_cpu(xen_callfuncsingle_irq, cpu) = rc;
 
139
 
 
140
        return 0;
 
141
 
 
142
 fail:
 
143
        if (per_cpu(xen_resched_irq, cpu) >= 0)
 
144
                unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
 
145
        if (per_cpu(xen_callfunc_irq, cpu) >= 0)
 
146
                unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
 
147
        if (per_cpu(xen_debug_irq, cpu) >= 0)
 
148
                unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
 
149
        if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0)
 
150
                unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu),
 
151
                                       NULL);
 
152
 
 
153
        return rc;
 
154
}
 
155
 
 
156
static void __init xen_fill_possible_map(void)
 
157
{
 
158
        int i, rc;
 
159
 
 
160
        if (xen_initial_domain())
 
161
                return;
 
162
 
 
163
        for (i = 0; i < nr_cpu_ids; i++) {
 
164
                rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
 
165
                if (rc >= 0) {
 
166
                        num_processors++;
 
167
                        set_cpu_possible(i, true);
 
168
                }
 
169
        }
 
170
}
 
171
 
 
172
static void __init xen_filter_cpu_maps(void)
 
173
{
 
174
        int i, rc;
 
175
 
 
176
        if (!xen_initial_domain())
 
177
                return;
 
178
 
 
179
        num_processors = 0;
 
180
        disabled_cpus = 0;
 
181
        for (i = 0; i < nr_cpu_ids; i++) {
 
182
                rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
 
183
                if (rc >= 0) {
 
184
                        num_processors++;
 
185
                        set_cpu_possible(i, true);
 
186
                } else {
 
187
                        set_cpu_possible(i, false);
 
188
                        set_cpu_present(i, false);
 
189
                }
 
190
        }
 
191
}
 
192
 
 
193
static void __init xen_smp_prepare_boot_cpu(void)
 
194
{
 
195
        BUG_ON(smp_processor_id() != 0);
 
196
        native_smp_prepare_boot_cpu();
 
197
 
 
198
        /* We've switched to the "real" per-cpu gdt, so make sure the
 
199
           old memory can be recycled */
 
200
        make_lowmem_page_readwrite(xen_initial_gdt);
 
201
 
 
202
        xen_filter_cpu_maps();
 
203
        xen_setup_vcpu_info_placement();
 
204
}
 
205
 
 
206
static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
 
207
{
 
208
        unsigned cpu;
 
209
        unsigned int i;
 
210
 
 
211
        if (skip_ioapic_setup) {
 
212
                char *m = (max_cpus == 0) ?
 
213
                        "The nosmp parameter is incompatible with Xen; " \
 
214
                        "use Xen dom0_max_vcpus=1 parameter" :
 
215
                        "The noapic parameter is incompatible with Xen";
 
216
 
 
217
                xen_raw_printk(m);
 
218
                panic(m);
 
219
        }
 
220
        xen_init_lock_cpu(0);
 
221
 
 
222
        smp_store_cpu_info(0);
 
223
        cpu_data(0).x86_max_cores = 1;
 
224
 
 
225
        for_each_possible_cpu(i) {
 
226
                zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
 
227
                zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
 
228
                zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
 
229
        }
 
230
        set_cpu_sibling_map(0);
 
231
 
 
232
        if (xen_smp_intr_init(0))
 
233
                BUG();
 
234
 
 
235
        if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
 
236
                panic("could not allocate xen_cpu_initialized_map\n");
 
237
 
 
238
        cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));
 
239
 
 
240
        /* Restrict the possible_map according to max_cpus. */
 
241
        while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
 
242
                for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
 
243
                        continue;
 
244
                set_cpu_possible(cpu, false);
 
245
        }
 
246
 
 
247
        for_each_possible_cpu (cpu) {
 
248
                struct task_struct *idle;
 
249
 
 
250
                if (cpu == 0)
 
251
                        continue;
 
252
 
 
253
                idle = fork_idle(cpu);
 
254
                if (IS_ERR(idle))
 
255
                        panic("failed fork for CPU %d", cpu);
 
256
 
 
257
                set_cpu_present(cpu, true);
 
258
        }
 
259
}
 
260
 
 
261
static int __cpuinit
 
262
cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
 
263
{
 
264
        struct vcpu_guest_context *ctxt;
 
265
        struct desc_struct *gdt;
 
266
        unsigned long gdt_mfn;
 
267
 
 
268
        if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
 
269
                return 0;
 
270
 
 
271
        ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
 
272
        if (ctxt == NULL)
 
273
                return -ENOMEM;
 
274
 
 
275
        gdt = get_cpu_gdt_table(cpu);
 
276
 
 
277
        ctxt->flags = VGCF_IN_KERNEL;
 
278
        ctxt->user_regs.ds = __USER_DS;
 
279
        ctxt->user_regs.es = __USER_DS;
 
280
        ctxt->user_regs.ss = __KERNEL_DS;
 
281
#ifdef CONFIG_X86_32
 
282
        ctxt->user_regs.fs = __KERNEL_PERCPU;
 
283
        ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
 
284
#else
 
285
        ctxt->gs_base_kernel = per_cpu_offset(cpu);
 
286
#endif
 
287
        ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
 
288
        ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
 
289
 
 
290
        memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
 
291
 
 
292
        xen_copy_trap_info(ctxt->trap_ctxt);
 
293
 
 
294
        ctxt->ldt_ents = 0;
 
295
 
 
296
        BUG_ON((unsigned long)gdt & ~PAGE_MASK);
 
297
 
 
298
        gdt_mfn = arbitrary_virt_to_mfn(gdt);
 
299
        make_lowmem_page_readonly(gdt);
 
300
        make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));
 
301
 
 
302
        ctxt->gdt_frames[0] = gdt_mfn;
 
303
        ctxt->gdt_ents      = GDT_ENTRIES;
 
304
 
 
305
        ctxt->user_regs.cs = __KERNEL_CS;
 
306
        ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
 
307
 
 
308
        ctxt->kernel_ss = __KERNEL_DS;
 
309
        ctxt->kernel_sp = idle->thread.sp0;
 
310
 
 
311
#ifdef CONFIG_X86_32
 
312
        ctxt->event_callback_cs     = __KERNEL_CS;
 
313
        ctxt->failsafe_callback_cs  = __KERNEL_CS;
 
314
#endif
 
315
        ctxt->event_callback_eip    = (unsigned long)xen_hypervisor_callback;
 
316
        ctxt->failsafe_callback_eip = (unsigned long)xen_failsafe_callback;
 
317
 
 
318
        per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
 
319
        ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
 
320
 
 
321
        if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))
 
322
                BUG();
 
323
 
 
324
        kfree(ctxt);
 
325
        return 0;
 
326
}
 
327
 
 
328
static int __cpuinit xen_cpu_up(unsigned int cpu)
 
329
{
 
330
        struct task_struct *idle = idle_task(cpu);
 
331
        int rc;
 
332
 
 
333
        per_cpu(current_task, cpu) = idle;
 
334
#ifdef CONFIG_X86_32
 
335
        irq_ctx_init(cpu);
 
336
#else
 
337
        clear_tsk_thread_flag(idle, TIF_FORK);
 
338
        per_cpu(kernel_stack, cpu) =
 
339
                (unsigned long)task_stack_page(idle) -
 
340
                KERNEL_STACK_OFFSET + THREAD_SIZE;
 
341
#endif
 
342
        xen_setup_runstate_info(cpu);
 
343
        xen_setup_timer(cpu);
 
344
        xen_init_lock_cpu(cpu);
 
345
 
 
346
        per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
 
347
 
 
348
        /* make sure interrupts start blocked */
 
349
        per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
 
350
 
 
351
        rc = cpu_initialize_context(cpu, idle);
 
352
        if (rc)
 
353
                return rc;
 
354
 
 
355
        if (num_online_cpus() == 1)
 
356
                alternatives_smp_switch(1);
 
357
 
 
358
        rc = xen_smp_intr_init(cpu);
 
359
        if (rc)
 
360
                return rc;
 
361
 
 
362
        rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
 
363
        BUG_ON(rc);
 
364
 
 
365
        while(per_cpu(cpu_state, cpu) != CPU_ONLINE) {
 
366
                HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
 
367
                barrier();
 
368
        }
 
369
 
 
370
        return 0;
 
371
}
 
372
 
 
373
static void xen_smp_cpus_done(unsigned int max_cpus)
 
374
{
 
375
}
 
376
 
 
377
#ifdef CONFIG_HOTPLUG_CPU
 
378
static int xen_cpu_disable(void)
 
379
{
 
380
        unsigned int cpu = smp_processor_id();
 
381
        if (cpu == 0)
 
382
                return -EBUSY;
 
383
 
 
384
        cpu_disable_common();
 
385
 
 
386
        load_cr3(swapper_pg_dir);
 
387
        return 0;
 
388
}
 
389
 
 
390
static void xen_cpu_die(unsigned int cpu)
 
391
{
 
392
        while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
 
393
                current->state = TASK_UNINTERRUPTIBLE;
 
394
                schedule_timeout(HZ/10);
 
395
        }
 
396
        unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
 
397
        unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
 
398
        unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
 
399
        unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
 
400
        xen_uninit_lock_cpu(cpu);
 
401
        xen_teardown_timer(cpu);
 
402
 
 
403
        if (num_online_cpus() == 1)
 
404
                alternatives_smp_switch(0);
 
405
}
 
406
 
 
407
static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */
 
408
{
 
409
        play_dead_common();
 
410
        HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
 
411
        cpu_bringup();
 
412
}
 
413
 
 
414
#else /* !CONFIG_HOTPLUG_CPU */
 
415
static int xen_cpu_disable(void)
 
416
{
 
417
        return -ENOSYS;
 
418
}
 
419
 
 
420
static void xen_cpu_die(unsigned int cpu)
 
421
{
 
422
        BUG();
 
423
}
 
424
 
 
425
static void xen_play_dead(void)
 
426
{
 
427
        BUG();
 
428
}
 
429
 
 
430
#endif
 
431
static void stop_self(void *v)
 
432
{
 
433
        int cpu = smp_processor_id();
 
434
 
 
435
        /* make sure we're not pinning something down */
 
436
        load_cr3(swapper_pg_dir);
 
437
        /* should set up a minimal gdt */
 
438
 
 
439
        set_cpu_online(cpu, false);
 
440
 
 
441
        HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL);
 
442
        BUG();
 
443
}
 
444
 
 
445
static void xen_stop_other_cpus(int wait)
 
446
{
 
447
        smp_call_function(stop_self, NULL, wait);
 
448
}
 
449
 
 
450
static void xen_smp_send_reschedule(int cpu)
 
451
{
 
452
        xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
 
453
}
 
454
 
 
455
static void xen_send_IPI_mask(const struct cpumask *mask,
 
456
                              enum ipi_vector vector)
 
457
{
 
458
        unsigned cpu;
 
459
 
 
460
        for_each_cpu_and(cpu, mask, cpu_online_mask)
 
461
                xen_send_IPI_one(cpu, vector);
 
462
}
 
463
 
 
464
static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
 
465
{
 
466
        int cpu;
 
467
 
 
468
        xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
 
469
 
 
470
        /* Make sure other vcpus get a chance to run if they need to. */
 
471
        for_each_cpu(cpu, mask) {
 
472
                if (xen_vcpu_stolen(cpu)) {
 
473
                        HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
 
474
                        break;
 
475
                }
 
476
        }
 
477
}
 
478
 
 
479
static void xen_smp_send_call_function_single_ipi(int cpu)
 
480
{
 
481
        xen_send_IPI_mask(cpumask_of(cpu),
 
482
                          XEN_CALL_FUNCTION_SINGLE_VECTOR);
 
483
}
 
484
 
 
485
static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
 
486
{
 
487
        irq_enter();
 
488
        generic_smp_call_function_interrupt();
 
489
        inc_irq_stat(irq_call_count);
 
490
        irq_exit();
 
491
 
 
492
        return IRQ_HANDLED;
 
493
}
 
494
 
 
495
static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
 
496
{
 
497
        irq_enter();
 
498
        generic_smp_call_function_single_interrupt();
 
499
        inc_irq_stat(irq_call_count);
 
500
        irq_exit();
 
501
 
 
502
        return IRQ_HANDLED;
 
503
}
 
504
 
 
505
static const struct smp_ops xen_smp_ops __initconst = {
 
506
        .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
 
507
        .smp_prepare_cpus = xen_smp_prepare_cpus,
 
508
        .smp_cpus_done = xen_smp_cpus_done,
 
509
 
 
510
        .cpu_up = xen_cpu_up,
 
511
        .cpu_die = xen_cpu_die,
 
512
        .cpu_disable = xen_cpu_disable,
 
513
        .play_dead = xen_play_dead,
 
514
 
 
515
        .stop_other_cpus = xen_stop_other_cpus,
 
516
        .smp_send_reschedule = xen_smp_send_reschedule,
 
517
 
 
518
        .send_call_func_ipi = xen_smp_send_call_function_ipi,
 
519
        .send_call_func_single_ipi = xen_smp_send_call_function_single_ipi,
 
520
};
 
521
 
 
522
void __init xen_smp_init(void)
 
523
{
 
524
        smp_ops = xen_smp_ops;
 
525
        xen_fill_possible_map();
 
526
        xen_init_spinlocks();
 
527
}
 
528
 
 
529
static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
 
530
{
 
531
        native_smp_prepare_cpus(max_cpus);
 
532
        WARN_ON(xen_smp_intr_init(0));
 
533
 
 
534
        xen_init_lock_cpu(0);
 
535
}
 
536
 
 
537
static int __cpuinit xen_hvm_cpu_up(unsigned int cpu)
 
538
{
 
539
        int rc;
 
540
        rc = native_cpu_up(cpu);
 
541
        WARN_ON (xen_smp_intr_init(cpu));
 
542
        return rc;
 
543
}
 
544
 
 
545
static void xen_hvm_cpu_die(unsigned int cpu)
 
546
{
 
547
        unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
 
548
        unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
 
549
        unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
 
550
        unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
 
551
        native_cpu_die(cpu);
 
552
}
 
553
 
 
554
void __init xen_hvm_smp_init(void)
 
555
{
 
556
        if (!xen_have_vector_callback)
 
557
                return;
 
558
        smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
 
559
        smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
 
560
        smp_ops.cpu_up = xen_hvm_cpu_up;
 
561
        smp_ops.cpu_die = xen_hvm_cpu_die;
 
562
        smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
 
563
        smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
 
564
}