2
* mtrr.c: MTRR/PAT virtualization
4
* Copyright (c) 2007, Intel Corporation.
6
* This program is free software; you can redistribute it and/or modify it
7
* under the terms and conditions of the GNU General Public License,
8
* version 2, as published by the Free Software Foundation.
10
* This program is distributed in the hope it will be useful, but WITHOUT
11
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15
* You should have received a copy of the GNU General Public License along with
16
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17
* Place - Suite 330, Boston, MA 02111-1307 USA.
20
#include <public/hvm/e820.h>
21
#include <xen/types.h>
24
#include <asm/paging.h>
26
#include <xen/domain_page.h>
28
#include <asm/hvm/support.h>
29
#include <asm/hvm/cacheattr.h>
31
extern struct mtrr_state mtrr_state;
33
static uint64_t phys_base_msr_mask;
34
static uint64_t phys_mask_msr_mask;
35
static uint32_t size_or_mask;
36
static uint32_t size_and_mask;
38
/* Get page attribute fields (PAn) from PAT MSR. */
39
#define pat_cr_2_paf(pat_cr,n) ((((uint64_t)pat_cr) >> ((n)<<3)) & 0xff)
41
/* PAT entry to PTE flags (PAT, PCD, PWT bits). */
42
static uint8_t pat_entry_2_pte_flags[8] = {
44
_PAGE_PCD, _PAGE_PCD | _PAGE_PWT,
45
_PAGE_PAT, _PAGE_PAT | _PAGE_PWT,
46
_PAGE_PAT | _PAGE_PCD, _PAGE_PAT | _PAGE_PCD | _PAGE_PWT };
48
/* Effective mm type lookup table, according to MTRR and PAT. */
49
static uint8_t mm_type_tbl[MTRR_NUM_TYPES][PAT_TYPE_NUMS] = {
50
/********PAT(UC,WC,RS,RS,WT,WP,WB,UC-)*/
51
/* RS means reserved type(2,3), and type is hardcoded here */
52
/*MTRR(UC):(UC,WC,RS,RS,UC,UC,UC,UC)*/
53
{0, 1, 2, 2, 0, 0, 0, 0},
54
/*MTRR(WC):(UC,WC,RS,RS,UC,UC,WC,WC)*/
55
{0, 1, 2, 2, 0, 0, 1, 1},
56
/*MTRR(RS):(RS,RS,RS,RS,RS,RS,RS,RS)*/
57
{2, 2, 2, 2, 2, 2, 2, 2},
58
/*MTRR(RS):(RS,RS,RS,RS,RS,RS,RS,RS)*/
59
{2, 2, 2, 2, 2, 2, 2, 2},
60
/*MTRR(WT):(UC,WC,RS,RS,WT,WP,WT,UC)*/
61
{0, 1, 2, 2, 4, 5, 4, 0},
62
/*MTRR(WP):(UC,WC,RS,RS,WT,WP,WP,WC)*/
63
{0, 1, 2, 2, 4, 5, 5, 1},
64
/*MTRR(WB):(UC,WC,RS,RS,WT,WP,WB,UC)*/
65
{0, 1, 2, 2, 4, 5, 6, 0}
69
* Reverse lookup table, to find a pat type according to MTRR and effective
70
* memory type. This table is dynamically generated.
72
static uint8_t mtrr_epat_tbl[MTRR_NUM_TYPES][MEMORY_NUM_TYPES];
74
/* Lookup table for PAT entry of a given PAT value in host PAT. */
75
static uint8_t pat_entry_tbl[PAT_TYPE_NUMS];
77
static void get_mtrr_range(uint64_t base_msr, uint64_t mask_msr,
78
uint64_t *base, uint64_t *end)
80
uint32_t mask_lo = (uint32_t)mask_msr;
81
uint32_t mask_hi = (uint32_t)(mask_msr >> 32);
82
uint32_t base_lo = (uint32_t)base_msr;
83
uint32_t base_hi = (uint32_t)(base_msr >> 32);
86
if ( (mask_lo & 0x800) == 0 )
88
/* Invalid (i.e. free) range */
94
/* Work out the shifted address mask. */
95
mask_lo = (size_or_mask | (mask_hi << (32 - PAGE_SHIFT)) |
96
(mask_lo >> PAGE_SHIFT));
98
/* This works correctly if size is a power of two (a contiguous range). */
100
*base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
101
*end = *base + size - 1;
104
bool_t is_var_mtrr_overlapped(struct mtrr_state *m)
107
uint64_t phys_base, phys_mask, phys_base_pre, phys_mask_pre;
108
uint64_t base_pre, end_pre, base, end;
109
uint8_t num_var_ranges = (uint8_t)m->mtrr_cap;
111
for ( i = 0; i < num_var_ranges; i++ )
113
phys_base_pre = ((uint64_t*)m->var_ranges)[i*2];
114
phys_mask_pre = ((uint64_t*)m->var_ranges)[i*2 + 1];
116
get_mtrr_range(phys_base_pre, phys_mask_pre,
117
&base_pre, &end_pre);
119
for ( seg = i + 1; seg < num_var_ranges; seg ++ )
121
phys_base = ((uint64_t*)m->var_ranges)[seg*2];
122
phys_mask = ((uint64_t*)m->var_ranges)[seg*2 + 1];
124
get_mtrr_range(phys_base, phys_mask,
127
if ( ((base_pre != end_pre) && (base != end))
128
|| ((base >= base_pre) && (base <= end_pre))
129
|| ((end >= base_pre) && (end <= end_pre))
130
|| ((base_pre >= base) && (base_pre <= end))
131
|| ((end_pre >= base) && (end_pre <= end)) )
133
/* MTRR is overlapped. */
141
#define MTRR_PHYSMASK_VALID_BIT 11
142
#define MTRR_PHYSMASK_SHIFT 12
144
#define MTRR_PHYSBASE_TYPE_MASK 0xff /* lowest 8 bits */
145
#define MTRR_PHYSBASE_SHIFT 12
148
#define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
149
#define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
151
static int hvm_mtrr_pat_init(void)
153
unsigned int i, j, phys_addr;
155
memset(&mtrr_epat_tbl, INVALID_MEM_TYPE, sizeof(mtrr_epat_tbl));
156
for ( i = 0; i < MTRR_NUM_TYPES; i++ )
158
for ( j = 0; j < PAT_TYPE_NUMS; j++ )
160
int32_t tmp = mm_type_tbl[i][j];
161
if ( (tmp >= 0) && (tmp < MEMORY_NUM_TYPES) )
162
mtrr_epat_tbl[i][tmp] = j;
166
memset(&pat_entry_tbl, INVALID_MEM_TYPE,
167
PAT_TYPE_NUMS * sizeof(pat_entry_tbl[0]));
168
for ( i = 0; i < PAT_TYPE_NUMS; i++ )
170
for ( j = 0; j < PAT_TYPE_NUMS; j++ )
172
if ( pat_cr_2_paf(host_pat, j) == i )
174
pat_entry_tbl[i] = j;
181
if ( cpuid_eax(0x80000000) >= 0x80000008 )
182
phys_addr = (uint8_t)cpuid_eax(0x80000008);
184
phys_base_msr_mask = ~((((uint64_t)1) << phys_addr) - 1) | 0xf00UL;
185
phys_mask_msr_mask = ~((((uint64_t)1) << phys_addr) - 1) | 0x7ffUL;
187
size_or_mask = ~((1 << (phys_addr - PAGE_SHIFT)) - 1);
188
size_and_mask = ~size_or_mask & 0xfff00000;
192
__initcall(hvm_mtrr_pat_init);
194
uint8_t pat_type_2_pte_flags(uint8_t pat_type)
196
int32_t pat_entry = pat_entry_tbl[pat_type];
198
/* INVALID_MEM_TYPE, means doesn't find the pat_entry in host pat for
199
* a given pat_type. If host pat covers all the pat types,
202
if ( likely(pat_entry != INVALID_MEM_TYPE) )
203
return pat_entry_2_pte_flags[pat_entry];
205
return pat_entry_2_pte_flags[pat_entry_tbl[PAT_TYPE_UNCACHABLE]];
208
int hvm_vcpu_cacheattr_init(struct vcpu *v)
210
struct mtrr_state *m = &v->arch.hvm_vcpu.mtrr;
212
memset(m, 0, sizeof(*m));
214
m->var_ranges = xmalloc_array(struct mtrr_var_range, MTRR_VCNT);
215
if ( m->var_ranges == NULL )
217
memset(m->var_ranges, 0, MTRR_VCNT * sizeof(struct mtrr_var_range));
219
m->mtrr_cap = (1u << 10) | (1u << 8) | MTRR_VCNT;
221
v->arch.hvm_vcpu.pat_cr =
222
((uint64_t)PAT_TYPE_WRBACK) | /* PAT0: WB */
223
((uint64_t)PAT_TYPE_WRTHROUGH << 8) | /* PAT1: WT */
224
((uint64_t)PAT_TYPE_UC_MINUS << 16) | /* PAT2: UC- */
225
((uint64_t)PAT_TYPE_UNCACHABLE << 24) | /* PAT3: UC */
226
((uint64_t)PAT_TYPE_WRBACK << 32) | /* PAT4: WB */
227
((uint64_t)PAT_TYPE_WRTHROUGH << 40) | /* PAT5: WT */
228
((uint64_t)PAT_TYPE_UC_MINUS << 48) | /* PAT6: UC- */
229
((uint64_t)PAT_TYPE_UNCACHABLE << 56); /* PAT7: UC */
234
void hvm_vcpu_cacheattr_destroy(struct vcpu *v)
236
xfree(v->arch.hvm_vcpu.mtrr.var_ranges);
240
* Get MTRR memory type for physical address pa.
242
static uint8_t get_mtrr_type(struct mtrr_state *m, paddr_t pa)
244
int32_t addr, seg, index;
245
uint8_t overlap_mtrr = 0;
246
uint8_t overlap_mtrr_pos = 0;
249
uint8_t num_var_ranges = m->mtrr_cap & 0xff;
251
if ( unlikely(!(m->enabled & 0x2)) )
252
return MTRR_TYPE_UNCACHABLE;
254
if ( (pa < 0x100000) && (m->enabled & 1) )
256
/* Fixed range MTRR takes effective */
257
addr = (uint32_t) pa;
258
if ( addr < 0x80000 )
261
return m->fixed_ranges[seg];
263
else if ( addr < 0xc0000 )
265
seg = (addr - 0x80000) >> 14;
266
index = (seg >> 3) + 1;
267
seg &= 7; /* select 0-7 segments */
268
return m->fixed_ranges[index*8 + seg];
272
/* 0xC0000 --- 0x100000 */
273
seg = (addr - 0xc0000) >> 12;
274
index = (seg >> 3) + 3;
275
seg &= 7; /* select 0-7 segments */
276
return m->fixed_ranges[index*8 + seg];
280
/* Match with variable MTRRs. */
281
for ( seg = 0; seg < num_var_ranges; seg++ )
283
phys_base = ((uint64_t*)m->var_ranges)[seg*2];
284
phys_mask = ((uint64_t*)m->var_ranges)[seg*2 + 1];
285
if ( phys_mask & (1 << MTRR_PHYSMASK_VALID_BIT) )
287
if ( ((uint64_t) pa & phys_mask) >> MTRR_PHYSMASK_SHIFT ==
288
(phys_base & phys_mask) >> MTRR_PHYSMASK_SHIFT )
290
if ( unlikely(m->overlapped) )
292
overlap_mtrr |= 1 << (phys_base & MTRR_PHYSBASE_TYPE_MASK);
293
overlap_mtrr_pos = phys_base & MTRR_PHYSBASE_TYPE_MASK;
297
/* If no overlap, return the found one */
298
return (phys_base & MTRR_PHYSBASE_TYPE_MASK);
304
/* Overlapped or not found. */
305
if ( unlikely(overlap_mtrr == 0) )
308
if ( likely(!(overlap_mtrr & ~( ((uint8_t)1) << overlap_mtrr_pos ))) )
309
/* Covers both one variable memory range matches and
310
* two or more identical match.
312
return overlap_mtrr_pos;
314
if ( overlap_mtrr & 0x1 )
315
/* Two or more match, one is UC. */
316
return MTRR_TYPE_UNCACHABLE;
318
if ( !(overlap_mtrr & 0xaf) )
319
/* Two or more match, WT and WB. */
320
return MTRR_TYPE_WRTHROUGH;
322
/* Behaviour is undefined, but return the last overlapped type. */
323
return overlap_mtrr_pos;
327
* return the memory type from PAT.
328
* NOTE: valid only when paging is enabled.
329
* Only 4K page PTE is supported now.
331
static uint8_t page_pat_type(uint64_t pat_cr, uint32_t pte_flags)
335
/* PCD/PWT -> bit 1/0 of PAT entry */
336
pat_entry = ( pte_flags >> 3 ) & 0x3;
337
/* PAT bits as bit 2 of PAT entry */
338
if ( pte_flags & _PAGE_PAT )
341
return (uint8_t)pat_cr_2_paf(pat_cr, pat_entry);
345
* Effective memory type for leaf page.
347
static uint8_t effective_mm_type(struct mtrr_state *m,
353
uint8_t mtrr_mtype, pat_value, effective;
355
/* if get_pat_flags() gives a dedicated MTRR type,
358
if ( gmtrr_mtype == NO_HARDCODE_MEM_TYPE )
359
mtrr_mtype = get_mtrr_type(m, gpa);
361
mtrr_mtype = gmtrr_mtype;
363
pat_value = page_pat_type(pat, pte_flags);
365
effective = mm_type_tbl[mtrr_mtype][pat_value];
370
uint32_t get_pat_flags(struct vcpu *v,
376
uint8_t guest_eff_mm_type;
377
uint8_t shadow_mtrr_type;
378
uint8_t pat_entry_value;
379
uint64_t pat = v->arch.hvm_vcpu.pat_cr;
380
struct mtrr_state *g = &v->arch.hvm_vcpu.mtrr;
382
/* 1. Get the effective memory type of guest physical address,
383
* with the pair of guest MTRR and PAT
385
guest_eff_mm_type = effective_mm_type(g, pat, gpaddr,
386
gl1e_flags, gmtrr_mtype);
387
/* 2. Get the memory type of host physical address, with MTRR */
388
shadow_mtrr_type = get_mtrr_type(&mtrr_state, spaddr);
390
/* 3. Find the memory type in PAT, with host MTRR memory type
391
* and guest effective memory type.
393
pat_entry_value = mtrr_epat_tbl[shadow_mtrr_type][guest_eff_mm_type];
394
/* If conflit occurs(e.g host MTRR is UC, guest memory type is
395
* WB),set UC as effective memory. Here, returning PAT_TYPE_UNCACHABLE will
396
* always set effective memory as UC.
398
if ( pat_entry_value == INVALID_MEM_TYPE )
400
struct domain *d = v->domain;
402
gfn_to_mfn(d, paddr_to_pfn(gpaddr), &p2mt);
403
if (p2m_is_ram(p2mt))
404
gdprintk(XENLOG_WARNING,
405
"Conflict occurs for a given guest l1e flags:%x "
406
"at %"PRIx64" (the effective mm type:%d), "
407
"because the host mtrr type is:%d\n",
408
gl1e_flags, (uint64_t)gpaddr, guest_eff_mm_type,
410
pat_entry_value = PAT_TYPE_UNCACHABLE;
412
/* 4. Get the pte flags */
413
return pat_type_2_pte_flags(pat_entry_value);
416
/* Helper funtions for seting mtrr/pat */
417
bool_t pat_msr_set(uint64_t *pat, uint64_t msr_content)
419
uint8_t *value = (uint8_t*)&msr_content;
422
if ( *pat != msr_content )
424
for ( i = 0; i < 8; i++ )
425
if ( unlikely(!(value[i] == 0 || value[i] == 1 ||
426
value[i] == 4 || value[i] == 5 ||
427
value[i] == 6 || value[i] == 7)) )
436
bool_t mtrr_def_type_msr_set(struct mtrr_state *m, uint64_t msr_content)
438
uint8_t def_type = msr_content & 0xff;
439
uint8_t enabled = (msr_content >> 10) & 0x3;
441
if ( unlikely(!(def_type == 0 || def_type == 1 || def_type == 4 ||
442
def_type == 5 || def_type == 6)) )
444
HVM_DBG_LOG(DBG_LEVEL_MSR, "invalid MTRR def type:%x\n", def_type);
448
if ( unlikely(msr_content && (msr_content & ~0xcffUL)) )
450
HVM_DBG_LOG(DBG_LEVEL_MSR, "invalid msr content:%"PRIx64"\n",
455
m->enabled = enabled;
456
m->def_type = def_type;
461
bool_t mtrr_fix_range_msr_set(struct mtrr_state *m, uint32_t row,
462
uint64_t msr_content)
464
uint64_t *fixed_range_base = (uint64_t *)m->fixed_ranges;
466
if ( fixed_range_base[row] != msr_content )
468
uint8_t *range = (uint8_t*)&msr_content;
471
for ( i = 0; i < 8; i++ )
474
if ( unlikely(!(type == 0 || type == 1 ||
475
type == 4 || type == 5 || type == 6)) )
479
fixed_range_base[row] = msr_content;
485
bool_t mtrr_var_range_msr_set(struct mtrr_state *m, uint32_t msr,
486
uint64_t msr_content)
490
uint64_t *var_range_base = (uint64_t*)m->var_ranges;
492
index = msr - MSR_IA32_MTRR_PHYSBASE0;
494
if ( var_range_base[index] != msr_content )
496
uint32_t type = msr_content & 0xff;
498
msr_mask = (index & 1) ? phys_mask_msr_mask : phys_base_msr_mask;
500
if ( unlikely(!(type == 0 || type == 1 ||
501
type == 4 || type == 5 || type == 6)) )
504
if ( unlikely(msr_content && (msr_content & msr_mask)) )
506
HVM_DBG_LOG(DBG_LEVEL_MSR, "invalid msr content:%"PRIx64"\n",
511
var_range_base[index] = msr_content;
514
m->overlapped = is_var_mtrr_overlapped(m);
519
bool_t mtrr_pat_not_equal(struct vcpu *vd, struct vcpu *vs)
521
struct mtrr_state *md = &vd->arch.hvm_vcpu.mtrr;
522
struct mtrr_state *ms = &vs->arch.hvm_vcpu.mtrr;
524
uint8_t num_var_ranges = (uint8_t)md->mtrr_cap;
526
/* Test fixed ranges. */
527
res = memcmp(md->fixed_ranges, ms->fixed_ranges,
528
NUM_FIXED_RANGES*sizeof(mtrr_type));
532
/* Test var ranges. */
533
res = memcmp(md->var_ranges, ms->var_ranges,
534
num_var_ranges*sizeof(struct mtrr_var_range));
538
/* Test default type MSR. */
539
if ( (md->def_type != ms->def_type)
540
&& (md->enabled != ms->enabled) )
544
if ( vd->arch.hvm_vcpu.pat_cr != vs->arch.hvm_vcpu.pat_cr )
550
void hvm_init_cacheattr_region_list(
553
INIT_LIST_HEAD(&d->arch.hvm_domain.pinned_cacheattr_ranges);
556
void hvm_destroy_cacheattr_region_list(
559
struct list_head *head = &d->arch.hvm_domain.pinned_cacheattr_ranges;
560
struct hvm_mem_pinned_cacheattr_range *range;
562
while ( !list_empty(head) )
564
range = list_entry(head->next,
565
struct hvm_mem_pinned_cacheattr_range,
567
list_del(&range->list);
572
int32_t hvm_get_mem_pinned_cacheattr(
577
struct hvm_mem_pinned_cacheattr_range *range;
581
if ( !is_hvm_domain(d) )
584
list_for_each_entry_rcu ( range,
585
&d->arch.hvm_domain.pinned_cacheattr_ranges,
588
if ( (guest_fn >= range->start) && (guest_fn <= range->end) )
598
int32_t hvm_set_mem_pinned_cacheattr(
604
struct hvm_mem_pinned_cacheattr_range *range;
606
if ( !((type == PAT_TYPE_UNCACHABLE) ||
607
(type == PAT_TYPE_WRCOMB) ||
608
(type == PAT_TYPE_WRTHROUGH) ||
609
(type == PAT_TYPE_WRPROT) ||
610
(type == PAT_TYPE_WRBACK) ||
611
(type == PAT_TYPE_UC_MINUS)) ||
615
range = xmalloc(struct hvm_mem_pinned_cacheattr_range);
619
memset(range, 0, sizeof(*range));
621
range->start = gfn_start;
622
range->end = gfn_end;
625
list_add_rcu(&range->list, &d->arch.hvm_domain.pinned_cacheattr_ranges);
630
static int hvm_save_mtrr_msr(struct domain *d, hvm_domain_context_t *h)
634
struct hvm_hw_mtrr hw_mtrr;
635
struct mtrr_state *mtrr_state;
639
mtrr_state = &v->arch.hvm_vcpu.mtrr;
641
hw_mtrr.msr_pat_cr = v->arch.hvm_vcpu.pat_cr;
643
hw_mtrr.msr_mtrr_def_type = mtrr_state->def_type
644
| (mtrr_state->enabled << 10);
645
hw_mtrr.msr_mtrr_cap = mtrr_state->mtrr_cap;
647
for ( i = 0; i < MTRR_VCNT; i++ )
650
hw_mtrr.msr_mtrr_var[i*2] =
651
((uint64_t*)mtrr_state->var_ranges)[i*2];
653
hw_mtrr.msr_mtrr_var[i*2+1] =
654
((uint64_t*)mtrr_state->var_ranges)[i*2+1];
657
for ( i = 0; i < NUM_FIXED_MSR; i++ )
658
hw_mtrr.msr_mtrr_fixed[i] =
659
((uint64_t*)mtrr_state->fixed_ranges)[i];
661
if ( hvm_save_entry(MTRR, v->vcpu_id, h, &hw_mtrr) != 0 )
667
static int hvm_load_mtrr_msr(struct domain *d, hvm_domain_context_t *h)
671
struct mtrr_state *mtrr_state;
672
struct hvm_hw_mtrr hw_mtrr;
674
vcpuid = hvm_load_instance(h);
675
if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
677
gdprintk(XENLOG_ERR, "HVM restore: domain has no vcpu %u\n", vcpuid);
681
if ( hvm_load_entry(MTRR, h, &hw_mtrr) != 0 )
684
mtrr_state = &v->arch.hvm_vcpu.mtrr;
686
pat_msr_set(&v->arch.hvm_vcpu.pat_cr, hw_mtrr.msr_pat_cr);
688
mtrr_state->mtrr_cap = hw_mtrr.msr_mtrr_cap;
690
for ( i = 0; i < NUM_FIXED_MSR; i++ )
691
mtrr_fix_range_msr_set(mtrr_state, i, hw_mtrr.msr_mtrr_fixed[i]);
693
for ( i = 0; i < MTRR_VCNT; i++ )
695
mtrr_var_range_msr_set(mtrr_state,
696
MTRRphysBase_MSR(i), hw_mtrr.msr_mtrr_var[i*2]);
697
mtrr_var_range_msr_set(mtrr_state,
698
MTRRphysMask_MSR(i), hw_mtrr.msr_mtrr_var[i*2+1]);
701
mtrr_def_type_msr_set(mtrr_state, hw_mtrr.msr_mtrr_def_type);
706
HVM_REGISTER_SAVE_RESTORE(MTRR, hvm_save_mtrr_msr, hvm_load_mtrr_msr,
709
uint8_t epte_get_entry_emt(struct domain *d, unsigned long gfn, mfn_t mfn,
710
uint8_t *ipat, int direct_mmio)
712
uint8_t gmtrr_mtype, hmtrr_mtype;
714
struct vcpu *v = current;
718
if ( (current->domain != d) &&
719
((d->vcpu == NULL) || ((v = d->vcpu[0]) == NULL)) )
720
return MTRR_TYPE_WRBACK;
722
if ( !v->domain->arch.hvm_domain.params[HVM_PARAM_IDENT_PT] )
723
return MTRR_TYPE_WRBACK;
725
if ( (v == current) && v->domain->arch.hvm_domain.is_in_uc_mode )
726
return MTRR_TYPE_UNCACHABLE;
728
if ( !mfn_valid(mfn_x(mfn)) )
729
return MTRR_TYPE_UNCACHABLE;
731
if ( hvm_get_mem_pinned_cacheattr(d, gfn, &type) )
734
if ( !iommu_enabled )
737
return MTRR_TYPE_WRBACK;
741
return MTRR_TYPE_UNCACHABLE;
746
return MTRR_TYPE_WRBACK;
749
gmtrr_mtype = get_mtrr_type(&v->arch.hvm_vcpu.mtrr, (gfn << PAGE_SHIFT));
750
hmtrr_mtype = get_mtrr_type(&mtrr_state, (mfn_x(mfn) << PAGE_SHIFT));
751
return ((gmtrr_mtype <= hmtrr_mtype) ? gmtrr_mtype : hmtrr_mtype);