2
* Copyright (c) 2009, Microsoft Corporation.
4
* This program is free software; you can redistribute it and/or modify it
5
* under the terms and conditions of the GNU General Public License,
6
* version 2, as published by the Free Software Foundation.
8
* This program is distributed in the hope it will be useful, but WITHOUT
9
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13
* You should have received a copy of the GNU General Public License along with
14
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15
* Place - Suite 330, Boston, MA 02111-1307 USA.
18
* Haiyang Zhang <haiyangz@microsoft.com>
19
* Hank Janssen <hjanssen@microsoft.com>
22
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
#include <linux/kernel.h>
26
#include <linux/slab.h>
27
#include <linux/vmalloc.h>
28
#include <linux/hyperv.h>
29
#include <asm/hyperv.h>
30
#include "hyperv_vmbus.h"
32
/* The one and only */
33
struct hv_context hv_context = {
34
.synic_initialized = false,
35
.hypercall_page = NULL,
36
.signal_event_param = NULL,
37
.signal_event_buffer = NULL,
41
* query_hypervisor_presence
42
* - Query the cpuid for presence of windows hypervisor
44
static int query_hypervisor_presence(void)
56
op = HVCPUID_VERSION_FEATURES;
57
cpuid(op, &eax, &ebx, &ecx, &edx);
59
return ecx & HV_PRESENT_BIT;
63
* query_hypervisor_info - Get version info of the windows hypervisor
65
static int query_hypervisor_info(void)
71
unsigned int max_leaf;
75
* Its assumed that this is called after confirming that Viridian
76
* is present. Query id and revision.
82
op = HVCPUID_VENDOR_MAXFUNCTION;
83
cpuid(op, &eax, &ebx, &ecx, &edx);
87
if (max_leaf >= HVCPUID_VERSION) {
93
cpuid(op, &eax, &ebx, &ecx, &edx);
94
pr_info("Hyper-V Host OS Build:%d-%d.%d-%d-%d.%d\n",
106
* do_hypercall- Invoke the specified hypercall
108
static u64 do_hypercall(u64 control, void *input, void *output)
112
u64 input_address = (input) ? virt_to_phys(input) : 0;
113
u64 output_address = (output) ? virt_to_phys(output) : 0;
114
void *hypercall_page = hv_context.hypercall_page;
116
__asm__ __volatile__("mov %0, %%r8" : : "r" (output_address) : "r8");
117
__asm__ __volatile__("call *%3" : "=a" (hv_status) :
118
"c" (control), "d" (input_address),
119
"m" (hypercall_page));
125
u32 control_hi = control >> 32;
126
u32 control_lo = control & 0xFFFFFFFF;
127
u32 hv_status_hi = 1;
128
u32 hv_status_lo = 1;
129
u64 input_address = (input) ? virt_to_phys(input) : 0;
130
u32 input_address_hi = input_address >> 32;
131
u32 input_address_lo = input_address & 0xFFFFFFFF;
132
u64 output_address = (output) ? virt_to_phys(output) : 0;
133
u32 output_address_hi = output_address >> 32;
134
u32 output_address_lo = output_address & 0xFFFFFFFF;
135
void *hypercall_page = hv_context.hypercall_page;
137
__asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
138
"=a"(hv_status_lo) : "d" (control_hi),
139
"a" (control_lo), "b" (input_address_hi),
140
"c" (input_address_lo), "D"(output_address_hi),
141
"S"(output_address_lo), "m" (hypercall_page));
143
return hv_status_lo | ((u64)hv_status_hi << 32);
148
* hv_init - Main initialization routine.
150
* This routine must be called before any other routines in here are called
155
union hv_x64_msr_hypercall_contents hypercall_msr;
156
void *virtaddr = NULL;
158
memset(hv_context.synic_event_page, 0, sizeof(void *) * MAX_NUM_CPUS);
159
memset(hv_context.synic_message_page, 0,
160
sizeof(void *) * MAX_NUM_CPUS);
162
if (!query_hypervisor_presence())
165
max_leaf = query_hypervisor_info();
167
rdmsrl(HV_X64_MSR_GUEST_OS_ID, hv_context.guestid);
169
if (hv_context.guestid != 0)
172
/* Write our OS info */
173
wrmsrl(HV_X64_MSR_GUEST_OS_ID, HV_LINUX_GUEST_ID);
174
hv_context.guestid = HV_LINUX_GUEST_ID;
176
/* See if the hypercall page is already set */
177
rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
179
virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
184
hypercall_msr.enable = 1;
186
hypercall_msr.guest_physical_address = vmalloc_to_pfn(virtaddr);
187
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
189
/* Confirm that hypercall page did get setup. */
190
hypercall_msr.as_uint64 = 0;
191
rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
193
if (!hypercall_msr.enable)
196
hv_context.hypercall_page = virtaddr;
198
/* Setup the global signal event param for the signal event hypercall */
199
hv_context.signal_event_buffer =
200
kmalloc(sizeof(struct hv_input_signal_event_buffer),
202
if (!hv_context.signal_event_buffer)
205
hv_context.signal_event_param =
206
(struct hv_input_signal_event *)
207
(ALIGN((unsigned long)
208
hv_context.signal_event_buffer,
209
HV_HYPERCALL_PARAM_ALIGN));
210
hv_context.signal_event_param->connectionid.asu32 = 0;
211
hv_context.signal_event_param->connectionid.u.id =
212
VMBUS_EVENT_CONNECTION_ID;
213
hv_context.signal_event_param->flag_number = 0;
214
hv_context.signal_event_param->rsvdz = 0;
220
if (hypercall_msr.enable) {
221
hypercall_msr.as_uint64 = 0;
222
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
232
* hv_cleanup - Cleanup routine.
234
* This routine is called normally during driver unloading or exiting.
236
void hv_cleanup(void)
238
union hv_x64_msr_hypercall_contents hypercall_msr;
240
kfree(hv_context.signal_event_buffer);
241
hv_context.signal_event_buffer = NULL;
242
hv_context.signal_event_param = NULL;
244
if (hv_context.hypercall_page) {
245
hypercall_msr.as_uint64 = 0;
246
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
247
vfree(hv_context.hypercall_page);
248
hv_context.hypercall_page = NULL;
253
* hv_post_message - Post a message using the hypervisor message IPC.
255
* This involves a hypercall.
257
u16 hv_post_message(union hv_connection_id connection_id,
258
enum hv_message_type message_type,
259
void *payload, size_t payload_size)
261
struct aligned_input {
263
struct hv_input_post_message msg;
266
struct hv_input_post_message *aligned_msg;
270
if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT)
273
addr = (unsigned long)kmalloc(sizeof(struct aligned_input), GFP_ATOMIC);
277
aligned_msg = (struct hv_input_post_message *)
278
(ALIGN(addr, HV_HYPERCALL_PARAM_ALIGN));
280
aligned_msg->connectionid = connection_id;
281
aligned_msg->message_type = message_type;
282
aligned_msg->payload_size = payload_size;
283
memcpy((void *)aligned_msg->payload, payload, payload_size);
285
status = do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL)
296
* Signal an event on the specified connection using the hypervisor event IPC.
298
* This involves a hypercall.
300
u16 hv_signal_event(void)
304
status = do_hypercall(HVCALL_SIGNAL_EVENT,
305
hv_context.signal_event_param,
311
* hv_synic_init - Initialize the Synthethic Interrupt Controller.
313
* If it is already initialized by another entity (ie x2v shim), we need to
314
* retrieve the initialized message and event pages. Otherwise, we create and
315
* initialize the message and event pages.
317
void hv_synic_init(void *irqarg)
320
union hv_synic_simp simp;
321
union hv_synic_siefp siefp;
322
union hv_synic_sint shared_sint;
323
union hv_synic_scontrol sctrl;
325
u32 irq_vector = *((u32 *)(irqarg));
326
int cpu = smp_processor_id();
328
if (!hv_context.hypercall_page)
331
/* Check the version */
332
rdmsrl(HV_X64_MSR_SVERSION, version);
334
hv_context.synic_message_page[cpu] =
335
(void *)get_zeroed_page(GFP_ATOMIC);
337
if (hv_context.synic_message_page[cpu] == NULL) {
338
pr_err("Unable to allocate SYNIC message page\n");
342
hv_context.synic_event_page[cpu] =
343
(void *)get_zeroed_page(GFP_ATOMIC);
345
if (hv_context.synic_event_page[cpu] == NULL) {
346
pr_err("Unable to allocate SYNIC event page\n");
350
/* Setup the Synic's message page */
351
rdmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
352
simp.simp_enabled = 1;
353
simp.base_simp_gpa = virt_to_phys(hv_context.synic_message_page[cpu])
356
wrmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
358
/* Setup the Synic's event page */
359
rdmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
360
siefp.siefp_enabled = 1;
361
siefp.base_siefp_gpa = virt_to_phys(hv_context.synic_event_page[cpu])
364
wrmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
366
/* Setup the shared SINT. */
367
rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
369
shared_sint.as_uint64 = 0;
370
shared_sint.vector = irq_vector; /* HV_SHARED_SINT_IDT_VECTOR + 0x20; */
371
shared_sint.masked = false;
372
shared_sint.auto_eoi = false;
374
wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
376
/* Enable the global synic bit */
377
rdmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
380
wrmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
382
hv_context.synic_initialized = true;
386
if (hv_context.synic_event_page[cpu])
387
free_page((unsigned long)hv_context.synic_event_page[cpu]);
389
if (hv_context.synic_message_page[cpu])
390
free_page((unsigned long)hv_context.synic_message_page[cpu]);
395
* hv_synic_cleanup - Cleanup routine for hv_synic_init().
397
void hv_synic_cleanup(void *arg)
399
union hv_synic_sint shared_sint;
400
union hv_synic_simp simp;
401
union hv_synic_siefp siefp;
402
int cpu = smp_processor_id();
404
if (!hv_context.synic_initialized)
407
rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
409
shared_sint.masked = 1;
411
/* Need to correctly cleanup in the case of SMP!!! */
412
/* Disable the interrupt */
413
wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
415
rdmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
416
simp.simp_enabled = 0;
417
simp.base_simp_gpa = 0;
419
wrmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
421
rdmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
422
siefp.siefp_enabled = 0;
423
siefp.base_siefp_gpa = 0;
425
wrmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
427
free_page((unsigned long)hv_context.synic_message_page[cpu]);
428
free_page((unsigned long)hv_context.synic_event_page[cpu]);