2
* Hypercall and fault low-level handling routines.
4
* Copyright (c) 2002-2004, K A Fraser
5
* Copyright (c) 1991, 1992 Linus Torvalds
7
* Calling back to a guest OS:
8
* ===========================
10
* First, we require that all callbacks (either via a supplied
11
* interrupt-descriptor-table, or via the special event or failsafe callbacks
12
* in the shared-info-structure) are to ring 1. This just makes life easier,
13
* in that it means we don't have to do messy GDT/LDT lookups to find
14
* out which the privilege-level of the return code-selector. That code
15
* would just be a hassle to write, and would need to account for running
16
* off the end of the GDT/LDT, for example. For all callbacks we check
17
* that the provided return CS is not == __HYPERVISOR_{CS,DS}. Apart from that
18
* we're safe as don't allow a guest OS to install ring-0 privileges into the
19
* GDT/LDT. It's up to the guest OS to ensure all returns via the IDT are to
20
* ring 1. If not, we load incorrect SS/ESP values from the TSS (for ring 1
21
* rather than the correct ring) and bad things are bound to ensue -- IRET is
22
* likely to fault, and we may end up killing the domain (no harm can
23
* come to Xen, though).
25
* When doing a callback, we check if the return CS is in ring 0. If so,
26
* callback is delayed until next return to ring != 0.
27
* If return CS is in ring 1, then we create a callback frame
28
* starting at return SS/ESP. The base of the frame does an intra-privilege
30
* If return CS is in ring > 1, we create a callback frame starting
31
* at SS/ESP taken from appropriate section of the current TSS. The base
32
* of the frame does an inter-privilege interrupt-return.
34
* Note that the "failsafe callback" uses a special stackframe:
35
* { return_DS, return_ES, return_FS, return_GS, return_EIP,
36
* return_CS, return_EFLAGS[, return_ESP, return_SS] }
37
* That is, original values for DS/ES/FS/GS are placed on stack rather than
38
* in DS/ES/FS/GS themselves. Why? It saves us loading them, only to have them
39
* saved/restored in guest OS. Furthermore, if we load them we may cause
40
* a fault if they are invalid, which is a hassle to deal with. We avoid
41
* that problem if we don't load them :-) This property allows us to use
42
* the failsafe callback as a fallback: if we ever fault on loading DS/ES/FS/GS
43
* on return to ring != 0, we can simply package it up as a return via
44
* the failsafe callback, and let the guest OS sort it out (perhaps by
45
* killing an application process). Note that we also do this for any
46
* faulting IRET -- just let the guest OS handle it via the event
49
* We terminate a domain in the following cases:
50
* - creating a callback stack frame (due to bad ring-1 stack).
51
* - faulting IRET on entry to failsafe callback handler.
52
* So, each domain must keep its ring-1 %ss/%esp and failsafe callback
53
* handler in good order (absolutely no faults allowed!).
56
#include <xen/config.h>
57
#include <xen/errno.h>
58
#include <xen/softirq.h>
59
#include <asm/asm_defns.h>
60
#include <asm/apicdef.h>
62
#include <public/xen.h>
64
#define GET_GUEST_REGS(reg) \
65
movl $~(STACK_SIZE-1),reg; \
67
orl $(STACK_SIZE-CPUINFO_sizeof),reg;
69
#define GET_CURRENT(reg) \
70
movl $STACK_SIZE-4, reg; \
77
ASSERT_INTERRUPTS_DISABLED
78
testl $X86_EFLAGS_VM,UREGS_eflags(%esp)
87
jnz .Lrestore_iret_guest
88
#ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
89
testb $2,UREGS_cs-UREGS_eip(%esp)
90
jnz .Lrestore_sregs_guest
91
call restore_ring0_guest
92
jmp .Lrestore_iret_guest
94
.Lrestore_sregs_guest:
95
.Lft1: mov UREGS_ds-UREGS_eip(%esp),%ds
96
.Lft2: mov UREGS_es-UREGS_eip(%esp),%es
97
.Lft3: mov UREGS_fs-UREGS_eip(%esp),%fs
98
.Lft4: mov UREGS_gs-UREGS_eip(%esp),%gs
104
mov UREGS_error_code(%esp),%esi
106
movl $__HYPERVISOR_CS,%eax
110
pushl %esi # error_code/entry_vector
112
.Ldf1: GET_CURRENT(%ebx)
116
leal VCPU_trap_bounce(%ebx),%edx
117
movl VCPU_failsafe_addr(%ebx),%eax
118
movl %eax,TRAPBOUNCE_eip(%edx)
119
movl VCPU_failsafe_sel(%ebx),%eax
120
movw %ax,TRAPBOUNCE_cs(%edx)
121
movb $TBF_FAILSAFE,TRAPBOUNCE_flags(%edx)
122
bt $_VGCF_failsafe_disables_events,VCPU_guest_context_flags(%ebx)
124
orb $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
125
1: call create_bounce_frame
127
movl %eax,UREGS_ds(%esp)
128
movl %eax,UREGS_es(%esp)
129
movl %eax,UREGS_fs(%esp)
130
movl %eax,UREGS_gs(%esp)
133
.section __pre_ex_table,"a"
140
.section __ex_table,"a"
141
.long .Ldf1,failsafe_callback
159
FIXUP_RING0_GUEST_STACK
163
cmpl $NR_hypercalls,%eax
165
PERFC_INCR(PERFC_hypercalls, %eax, %ebx)
167
/* Create shadow parameters and corrupt those not used by this call. */
169
pushl UREGS_eip+4(%esp)
176
movzb hypercall_args_table(,%eax,1),%ecx
177
leal (%esp,%ecx,4),%edi
181
movl $0xDEADBEEF,%eax
184
#define SHADOW_BYTES 32 /* 6 shadow parameters + EIP + hypercall # */
187
* We need shadow parameters even on non-debug builds. We depend on the
188
* original versions not being clobbered (needed to create a hypercall
189
* continuation). But that isn't guaranteed by the function-call ABI.
197
#define SHADOW_BYTES 24 /* 6 shadow parameters */
202
/* Now restore all the registers that trace_hypercall clobbered */
203
movl UREGS_eax+SHADOW_BYTES(%esp),%eax /* Hypercall # */
205
1: call *hypercall_table(,%eax,4)
206
addl $24,%esp # Discard the shadow parameters
208
/* Deliberately corrupt real parameter regs used by this hypercall. */
209
popl %ecx # Shadow EIP
210
cmpl %ecx,UREGS_eip+4(%esp)
211
popl %ecx # Shadow hypercall index
212
jne skip_clobber # If EIP has changed then don't clobber
213
movzb hypercall_args_table(,%ecx,1),%ecx
216
movl $0xDEADBEEF,%eax
221
movl %eax,UREGS_eax(%esp) # save the return value
226
cli # tests must not race interrupts
228
movl VCPU_processor(%ebx),%eax
229
shl $IRQSTAT_shift,%eax
230
test %ecx,irq_stat(%eax,1)
232
testb $1,VCPU_mce_pending(%ebx)
234
testb $1,VCPU_nmi_pending(%ebx)
237
movl VCPU_vcpu_info(%ebx),%eax
238
testb $0xFF,VCPUINFO_upcall_mask(%eax)
239
jnz restore_all_guest
240
testb $0xFF,VCPUINFO_upcall_pending(%eax)
242
/*process_guest_events:*/
244
leal VCPU_trap_bounce(%ebx),%edx
245
movl VCPU_event_addr(%ebx),%eax
246
movl %eax,TRAPBOUNCE_eip(%edx)
247
movl VCPU_event_sel(%ebx),%eax
248
movw %ax,TRAPBOUNCE_cs(%edx)
249
movb $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
250
call create_bounce_frame
260
/* %ebx: struct vcpu */
262
cmpw $VCPU_TRAP_MCE,VCPU_trap_priority(%ebx)
263
jae test_guest_events
265
movb $0,VCPU_mce_pending(%ebx)
266
call set_guest_machinecheck_trapbounce
269
movw VCPU_trap_priority(%ebx),%dx # safe priority for the
270
movw %dx,VCPU_old_trap_priority(%ebx) # iret hypercall
271
movw $VCPU_TRAP_MCE,VCPU_trap_priority(%ebx)
275
/* %ebx: struct vcpu */
277
cmpw $VCPU_TRAP_NMI,VCPU_trap_priority(%ebx)
278
jae test_guest_events
280
movb $0,VCPU_nmi_pending(%ebx)
281
call set_guest_nmi_trapbounce
284
movw VCPU_trap_priority(%ebx),%dx # safe priority for the
285
movw %dx,VCPU_old_trap_priority(%ebx) # iret hypercall
286
movw $VCPU_TRAP_NMI,VCPU_trap_priority(%ebx)
289
leal VCPU_trap_bounce(%ebx),%edx
290
call create_bounce_frame
294
movl $-ENOSYS,UREGS_eax(%esp)
297
/* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
298
/* {EIP, CS, EFLAGS, [ESP, SS]} */
299
/* %edx == trap_bounce, %ebx == struct vcpu */
300
/* %eax,%ecx are clobbered. %gs:%esi contain new UREGS_ss/UREGS_esp. */
302
ASSERT_INTERRUPTS_ENABLED
303
movl UREGS_eflags+4(%esp),%ecx
304
movb UREGS_cs+4(%esp),%cl
305
testl $(2|X86_EFLAGS_VM),%ecx
306
jz ring1 /* jump if returning to an existing ring-1 activation */
307
movl VCPU_kernel_sp(%ebx),%esi
308
.Lft6: mov VCPU_kernel_ss(%ebx),%gs
309
testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
311
subl $16,%esi /* push ES/DS/FS/GS (VM86 stack frame) */
312
movl UREGS_es+4(%esp),%eax
313
.Lft7: movl %eax,%gs:(%esi)
314
movl UREGS_ds+4(%esp),%eax
315
.Lft8: movl %eax,%gs:4(%esi)
316
movl UREGS_fs+4(%esp),%eax
317
.Lft9: movl %eax,%gs:8(%esi)
318
movl UREGS_gs+4(%esp),%eax
319
.Lft10: movl %eax,%gs:12(%esi)
321
subl $8,%esi /* push SS/ESP (inter-priv iret) */
322
movl UREGS_esp+4(%esp),%eax
323
.Lft11: movl %eax,%gs:(%esi)
324
movl UREGS_ss+4(%esp),%eax
325
.Lft12: movl %eax,%gs:4(%esi)
327
ring1: /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */
328
movl UREGS_esp+4(%esp),%esi
329
.Lft13: mov UREGS_ss+4(%esp),%gs
330
1: /* Construct a stack frame: EFLAGS, CS/EIP */
331
movb TRAPBOUNCE_flags(%edx),%cl
333
movl UREGS_eip+4(%esp),%eax
334
.Lft14: movl %eax,%gs:(%esi)
335
movl VCPU_vcpu_info(%ebx),%eax
336
pushl VCPUINFO_upcall_mask(%eax)
337
testb $TBF_INTERRUPT,%cl
338
setnz %ch # TBF_INTERRUPT -> set upcall mask
339
orb %ch,VCPUINFO_upcall_mask(%eax)
341
shll $16,%eax # Bits 16-23: saved_upcall_mask
342
movw UREGS_cs+4(%esp),%ax # Bits 0-15: CS
343
#ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
346
and $~3,%ax # RPL 1 -> RPL 0
348
.Lft15: movl %eax,%gs:4(%esi)
349
test $0x00FF0000,%eax # Bits 16-23: saved_upcall_mask
350
setz %ch # %ch == !saved_upcall_mask
351
movl UREGS_eflags+4(%esp),%eax
352
andl $~X86_EFLAGS_IF,%eax
353
shlb $1,%ch # Bit 9 (EFLAGS.IF)
354
orb %ch,%ah # Fold EFLAGS.IF into %eax
355
.Lft16: movl %eax,%gs:8(%esi)
356
test $TBF_EXCEPTION_ERRCODE,%cl
358
subl $4,%esi # push error_code onto guest frame
359
movl TRAPBOUNCE_error_code(%edx),%eax
360
.Lft17: movl %eax,%gs:(%esi)
361
1: testb $TBF_FAILSAFE,%cl
363
subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame
364
testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
366
xorl %eax,%eax # VM86: we write zero selector values
367
.Lft18: movl %eax,%gs:(%esi)
368
.Lft19: movl %eax,%gs:4(%esi)
369
.Lft20: movl %eax,%gs:8(%esi)
370
.Lft21: movl %eax,%gs:12(%esi)
373
movl UREGS_ds+4(%esp),%eax # non-VM86: write real selector values
374
.Lft22: movl %eax,%gs:(%esi)
375
movl UREGS_es+4(%esp),%eax
376
.Lft23: movl %eax,%gs:4(%esi)
377
movl UREGS_fs+4(%esp),%eax
378
.Lft24: movl %eax,%gs:8(%esi)
379
movl UREGS_gs+4(%esp),%eax
380
.Lft25: movl %eax,%gs:12(%esi)
381
2: testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
383
xorl %eax,%eax /* zero DS-GS, just as a real CPU would */
384
movl %eax,UREGS_ds+4(%esp)
385
movl %eax,UREGS_es+4(%esp)
386
movl %eax,UREGS_fs+4(%esp)
387
movl %eax,UREGS_gs+4(%esp)
389
/* Rewrite our stack frame and return to ring 1. */
390
/* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
391
andl $~(X86_EFLAGS_VM|X86_EFLAGS_RF|\
392
X86_EFLAGS_NT|X86_EFLAGS_TF),UREGS_eflags+4(%esp)
393
mov %gs,UREGS_ss+4(%esp)
394
movl %esi,UREGS_esp+4(%esp)
395
movzwl TRAPBOUNCE_cs(%edx),%eax
396
/* Null selectors (0-3) are not allowed. */
398
jz domain_crash_synchronous
399
movl %eax,UREGS_cs+4(%esp)
400
movl TRAPBOUNCE_eip(%edx),%eax
401
movl %eax,UREGS_eip+4(%esp)
403
.section __ex_table,"a"
404
.long .Lft6,domain_crash_synchronous , .Lft7,domain_crash_synchronous
405
.long .Lft8,domain_crash_synchronous , .Lft9,domain_crash_synchronous
406
.long .Lft10,domain_crash_synchronous , .Lft11,domain_crash_synchronous
407
.long .Lft12,domain_crash_synchronous , .Lft13,domain_crash_synchronous
408
.long .Lft14,domain_crash_synchronous , .Lft15,domain_crash_synchronous
409
.long .Lft16,domain_crash_synchronous , .Lft17,domain_crash_synchronous
410
.long .Lft18,domain_crash_synchronous , .Lft19,domain_crash_synchronous
411
.long .Lft20,domain_crash_synchronous , .Lft21,domain_crash_synchronous
412
.long .Lft22,domain_crash_synchronous , .Lft23,domain_crash_synchronous
413
.long .Lft24,domain_crash_synchronous , .Lft25,domain_crash_synchronous
416
domain_crash_synchronous_string:
417
.asciz "domain_crash_sync called from entry.S (%lx)\n"
419
domain_crash_synchronous:
420
pushl $domain_crash_synchronous_string
422
jmp __domain_crash_synchronous
427
movl UREGS_eflags(%esp),%eax
428
movb UREGS_cs(%esp),%al
429
testl $(3|X86_EFLAGS_VM),%eax
434
pushl $TRAP_divide_error<<16
437
FIXUP_RING0_GUEST_STACK
440
/* Exception within Xen: make sure we have valid %ds,%es. */
445
2: testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%esp)
446
jz exception_with_ints_disabled
447
sti # re-enable interrupts
449
movw UREGS_entry_vector(%esp),%ax
451
pushl %edx # push the cpu_user_regs pointer
453
PERFC_INCR(PERFC_exceptions, %eax, %ebx)
454
call *exception_table(,%eax,4)
456
movl UREGS_eflags(%esp),%eax
457
movb UREGS_cs(%esp),%al
458
testl $(3|X86_EFLAGS_VM),%eax
460
leal VCPU_trap_bounce(%ebx),%edx
461
testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%edx)
463
call create_bounce_frame
464
movb $0,TRAPBOUNCE_flags(%edx)
467
exception_with_ints_disabled:
468
movl UREGS_eflags(%esp),%eax
469
movb UREGS_cs(%esp),%al
470
testl $(3|X86_EFLAGS_VM),%eax # interrupts disabled outside Xen?
471
jnz FATAL_exception_with_ints_disabled
473
call search_pre_exception_table
475
testl %eax,%eax # no fixup code for faulting EIP?
477
movl %eax,UREGS_eip(%esp)
481
movl $UREGS_kernel_sizeof/4,%ecx
482
rep; movsl # make room for error_code/entry_vector
483
movl UREGS_error_code(%esp),%eax # error_code/entry_vector
484
movl %eax,UREGS_kernel_sizeof(%esp)
485
jmp restore_all_xen # return to fixup code
487
FATAL_exception_with_ints_disabled:
489
movw UREGS_entry_vector(%esp),%si
491
pushl %edx # push the cpu_user_regs pointer
492
pushl %esi # push the trapnr (entry vector)
496
ENTRY(coprocessor_error)
497
pushl $TRAP_copro_error<<16
500
ENTRY(simd_coprocessor_error)
501
pushl $TRAP_simd_error<<16
504
ENTRY(device_not_available)
505
pushl $TRAP_no_device<<16
509
pushl $TRAP_debug<<16
517
pushl $TRAP_overflow<<16
521
pushl $TRAP_bounds<<16
525
pushl $TRAP_invalid_op<<16
528
ENTRY(coprocessor_segment_overrun)
529
pushl $TRAP_copro_seg<<16
533
movw $TRAP_invalid_tss,2(%esp)
536
ENTRY(segment_not_present)
537
movw $TRAP_no_segment,2(%esp)
541
movw $TRAP_stack_error,2(%esp)
544
ENTRY(general_protection)
545
movw $TRAP_gp_fault,2(%esp)
548
ENTRY(alignment_check)
549
movw $TRAP_alignment_check,2(%esp)
553
movw $TRAP_page_fault,2(%esp)
556
ENTRY(spurious_interrupt_bug)
557
pushl $TRAP_spurious_int<<16
560
ENTRY(early_page_fault)
564
call do_early_page_fault
569
#ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
570
# NMI/MCE entry protocol is incompatible with guest kernel in ring 0.
574
# Save state but do not trash the segment registers!
575
SAVE_ALL(.Lnmi_mce_xen,.Lnmi_mce_common)
578
movw UREGS_entry_vector(%esp),%ax
581
call *exception_table(,%eax,4)
584
* NB. We may return to Xen context with polluted %ds/%es. But in such
585
* cases we have put guest DS/ES on the guest stack frame, which will
586
* be detected by SAVE_ALL(), or we have rolled back restore_guest.
590
/* Check the outer (guest) context for %ds/%es state validity. */
592
testl $X86_EFLAGS_VM,%ss:UREGS_eflags(%ebx)
596
/* We may have interrupted Xen while messing with %ds/%es... */
598
mov %ecx,%ds /* Ensure %ds is valid */
599
cmove UREGS_ds(%ebx),%eax /* Grab guest DS if it wasn't in %ds */
601
movl %eax,UREGS_ds(%ebx) /* Ensure guest frame contains guest DS */
602
cmove UREGS_es(%ebx),%edx /* Grab guest ES if it wasn't in %es */
603
mov %ecx,%es /* Ensure %es is valid */
604
movl $.Lrestore_sregs_guest,%ecx
605
movl %edx,UREGS_es(%ebx) /* Ensure guest frame contains guest ES */
606
cmpl %ecx,UREGS_eip(%esp)
608
cmpl $.Lrestore_iret_guest,UREGS_eip(%esp)
610
/* Roll outer context restore_guest back to restoring %ds/%es. */
611
movl %ecx,UREGS_eip(%esp)
614
/* vm86 is easy: the CPU saved %ds/%es so we can safely stomp them. */
618
#endif /* !CONFIG_X86_SUPERVISOR_MODE_KERNEL */
625
pushl $TRAP_machine_check<<16
628
ENTRY(setup_vm86_frame)
631
# Copies the entire stack frame forwards by 16 bytes.
632
.macro copy_vm86_words count=18
634
pushl ((\count-1)*4)(%esp)
635
popl ((\count-1)*4)+16(%esp)
636
copy_vm86_words "(\count-1)"
645
ENTRY(exception_table)
646
.long do_divide_error
653
.long do_device_not_available
654
.long 0 # double fault
655
.long do_coprocessor_segment_overrun
657
.long do_segment_not_present
658
.long do_stack_segment
659
.long do_general_protection
661
.long do_spurious_interrupt_bug
662
.long do_coprocessor_error
663
.long do_alignment_check
664
.long do_machine_check
665
.long do_simd_coprocessor_error
667
ENTRY(hypercall_table)
668
.long do_set_trap_table /* 0 */
671
.long do_stack_switch
672
.long do_set_callbacks
673
.long do_fpu_taskswitch /* 5 */
674
.long do_sched_op_compat
676
.long do_set_debugreg
677
.long do_get_debugreg
678
.long do_update_descriptor /* 10 */
679
.long do_ni_hypercall
682
.long do_update_va_mapping
683
.long do_set_timer_op /* 15 */
684
.long do_event_channel_op_compat
687
.long do_physdev_op_compat
688
.long do_grant_table_op /* 20 */
690
.long do_update_va_mapping_otherdomain
693
.long do_ni_hypercall /* 25 */
698
.long do_callback_op /* 30 */
700
.long do_event_channel_op
703
.long do_sysctl /* 35 */
706
.rept __HYPERVISOR_arch_0-((.-hypercall_table)/4)
707
.long do_ni_hypercall
709
.long do_mca /* 48 */
710
.rept NR_hypercalls-((.-hypercall_table)/4)
711
.long do_ni_hypercall
714
ENTRY(hypercall_args_table)
715
.byte 1 /* do_set_trap_table */ /* 0 */
716
.byte 4 /* do_mmu_update */
717
.byte 2 /* do_set_gdt */
718
.byte 2 /* do_stack_switch */
719
.byte 4 /* do_set_callbacks */
720
.byte 1 /* do_fpu_taskswitch */ /* 5 */
721
.byte 2 /* do_sched_op_compat */
722
.byte 1 /* do_platform_op */
723
.byte 2 /* do_set_debugreg */
724
.byte 1 /* do_get_debugreg */
725
.byte 4 /* do_update_descriptor */ /* 10 */
726
.byte 0 /* do_ni_hypercall */
727
.byte 2 /* do_memory_op */
728
.byte 2 /* do_multicall */
729
.byte 4 /* do_update_va_mapping */
730
.byte 2 /* do_set_timer_op */ /* 15 */
731
.byte 1 /* do_event_channel_op_compat */
732
.byte 2 /* do_xen_version */
733
.byte 3 /* do_console_io */
734
.byte 1 /* do_physdev_op_compat */
735
.byte 3 /* do_grant_table_op */ /* 20 */
736
.byte 2 /* do_vm_assist */
737
.byte 5 /* do_update_va_mapping_otherdomain */
738
.byte 0 /* do_iret */
739
.byte 3 /* do_vcpu_op */
740
.byte 0 /* do_ni_hypercall */ /* 25 */
741
.byte 4 /* do_mmuext_op */
742
.byte 1 /* do_xsm_op */
743
.byte 2 /* do_nmi_op */
744
.byte 2 /* do_sched_op */
745
.byte 2 /* do_callback_op */ /* 30 */
746
.byte 2 /* do_xenoprof_op */
747
.byte 2 /* do_event_channel_op */
748
.byte 2 /* do_physdev_op */
749
.byte 2 /* do_hvm_op */
750
.byte 1 /* do_sysctl */ /* 35 */
751
.byte 1 /* do_domctl */
752
.byte 2 /* do_kexec_op */
753
.rept __HYPERVISOR_arch_0-(.-hypercall_args_table)
754
.byte 0 /* do_ni_hypercall */
756
.byte 1 /* do_mca */ /* 48 */
757
.rept NR_hypercalls-(.-hypercall_args_table)
758
.byte 0 /* do_ni_hypercall */