2
* Copyright (c) 2008, Intel Corporation.
4
* This program is free software; you can redistribute it and/or modify it
5
* under the terms and conditions of the GNU General Public License,
6
* version 2, as published by the Free Software Foundation.
8
* This program is distributed in the hope it will be useful, but WITHOUT
9
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13
* You should have received a copy of the GNU General Public License along with
14
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15
* Place - Suite 330, Boston, MA 02111-1307 USA.
17
* Copyright (C) Allen Kay <allen.m.kay@intel.com>
18
* Copyright (C) Weidong Han <weidong.han@intel.com>
21
#include <xen/sched.h>
22
#include <xen/domain_page.h>
23
#include <asm/paging.h>
24
#include <xen/iommu.h>
31
* iommu_inclusive_mapping: when set, all memory below 4GB is included in dom0
32
* 1:1 iommu mappings except xen and unusable regions.
34
static int iommu_inclusive_mapping = 1;
35
boolean_param("iommu_inclusive_mapping", iommu_inclusive_mapping);
37
void *map_vtd_domain_page(u64 maddr)
39
return map_domain_page(maddr >> PAGE_SHIFT_4K);
42
void unmap_vtd_domain_page(void *va)
44
unmap_domain_page(va);
47
unsigned int get_cache_line_size(void)
49
return ((cpuid_ebx(1) >> 8) & 0xff) * 8;
52
void cacheline_flush(char * addr)
57
void flush_all_cache()
62
void *map_to_nocache_virt(int nr_iommus, u64 maddr)
64
set_fixmap_nocache(FIX_IOMMU_REGS_BASE_0 + nr_iommus, maddr);
65
return (void *)fix_to_virt(FIX_IOMMU_REGS_BASE_0 + nr_iommus);
68
struct hvm_irq_dpci *domain_get_irq_dpci(struct domain *domain)
73
return domain->arch.hvm_domain.irq.dpci;
76
int domain_set_irq_dpci(struct domain *domain, struct hvm_irq_dpci *dpci)
78
if ( !domain || !dpci )
81
domain->arch.hvm_domain.irq.dpci = dpci;
85
void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq)
87
struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
88
struct hvm_irq_dpci *dpci = NULL;
89
struct dev_intx_gsi_link *digl, *tmp;
92
ASSERT(isairq < NR_ISAIRQS);
96
spin_lock(&d->event_lock);
98
dpci = domain_get_irq_dpci(d);
100
if ( !dpci || !test_bit(isairq, dpci->isairq_map) )
102
spin_unlock(&d->event_lock);
105
/* Multiple mirq may be mapped to one isa irq */
106
for ( i = find_first_bit(dpci->mapping, d->nr_pirqs);
108
i = find_next_bit(dpci->mapping, d->nr_pirqs, i + 1) )
110
list_for_each_entry_safe ( digl, tmp,
111
&dpci->mirq[i].digl_list, list )
113
if ( hvm_irq->pci_link.route[digl->link] == isairq )
115
hvm_pci_intx_deassert(d, digl->device, digl->intx);
116
if ( --dpci->mirq[i].pending == 0 )
118
stop_timer(&dpci->hvm_timer[domain_pirq_to_irq(d, i)]);
119
pirq_guest_eoi(d, i);
124
spin_unlock(&d->event_lock);
127
void iommu_set_dom0_mapping(struct domain *d)
129
u64 i, j, tmp, max_pfn;
130
extern int xen_in_range(unsigned long mfn);
132
BUG_ON(d->domain_id != 0);
134
max_pfn = max_t(u64, max_page, 0x100000000ull >> PAGE_SHIFT);
136
for ( i = 0; i < max_pfn; i++ )
139
* Set up 1:1 mapping for dom0. Default to use only conventional RAM
140
* areas and let RMRRs include needed reserved regions. When set, the
141
* inclusive mapping maps in everything below 4GB except unusable
144
if ( !page_is_ram_type(i, RAM_TYPE_CONVENTIONAL) &&
145
(!iommu_inclusive_mapping ||
146
page_is_ram_type(i, RAM_TYPE_UNUSABLE)) )
149
/* Exclude Xen bits */
150
if ( xen_in_range(i) )
153
tmp = 1 << (PAGE_SHIFT - PAGE_SHIFT_4K);
154
for ( j = 0; j < tmp; j++ )
155
iommu_map_page(d, (i*tmp+j), (i*tmp+j));