2
* Copyright (c) 2001-2006 Jakub Jermar
5
* Redistribution and use in source and binary forms, with or without
6
* modification, are permitted provided that the following conditions
9
* - Redistributions of source code must retain the above copyright
10
* notice, this list of conditions and the following disclaimer.
11
* - Redistributions in binary form must reproduce the above copyright
12
* notice, this list of conditions and the following disclaimer in the
13
* documentation and/or other materials provided with the distribution.
14
* - The name of the author may not be used to endorse or promote products
15
* derived from this software without specific prior written permission.
17
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
/** @addtogroup genericmm
35
* @brief Virtual Address Translation subsystem.
37
* This file contains code for creating, destroying and searching
38
* mappings between virtual addresses and physical addresses.
39
* Functions here are mere wrappers that call the real implementation.
40
* They however, define the single interface.
44
* Note on memory prefetching and updating memory mappings, also described in:
45
* AMD x86-64 Architecture Programmer's Manual, Volume 2, System Programming,
46
* 7.2.1 Special Coherency Considerations.
48
* The processor which modifies a page table mapping can access prefetched data
49
* from the old mapping. In order to prevent this, we place a memory barrier
50
* after a mapping is updated.
52
* We assume that the other processors are either not using the mapping yet
53
* (i.e. during the bootstrap) or are executing the TLB shootdown code. While
54
* we don't care much about the former case, the processors in the latter case
55
* will do an implicit serialization by virtue of running the TLB shootdown
60
#include <arch/mm/page.h>
61
#include <arch/mm/asid.h>
64
#include <arch/barrier.h>
65
#include <arch/types.h>
71
/** Virtual operations for page subsystem. */
72
page_mapping_operations_t *page_mapping_operations = NULL;
79
/** Map memory structure
81
* Identity-map memory structure
82
* considering possible crossings
85
* @param s Address of the structure.
86
* @param size Size of the structure.
88
void map_structure(uintptr_t s, size_t size)
92
length = size + (s - (s & ~(PAGE_SIZE - 1)));
93
cnt = length / PAGE_SIZE + (length % PAGE_SIZE > 0);
95
for (i = 0; i < cnt; i++)
96
page_mapping_insert(AS_KERNEL, s + i * PAGE_SIZE,
97
s + i * PAGE_SIZE, PAGE_NOT_CACHEABLE | PAGE_WRITE);
99
/* Repel prefetched accesses to the old mapping. */
103
/** Insert mapping of page to frame.
105
* Map virtual address page to physical address frame
106
* using flags. Allocate and setup any missing page tables.
108
* The page table must be locked and interrupts must be disabled.
110
* @param as Address space to wich page belongs.
111
* @param page Virtual address of the page to be mapped.
112
* @param frame Physical address of memory frame to which the mapping is
114
* @param flags Flags to be used for mapping.
116
void page_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, int flags)
118
ASSERT(page_mapping_operations);
119
ASSERT(page_mapping_operations->mapping_insert);
121
page_mapping_operations->mapping_insert(as, page, frame, flags);
123
/* Repel prefetched accesses to the old mapping. */
127
/** Remove mapping of page.
129
* Remove any mapping of page within address space as.
130
* TLB shootdown should follow in order to make effects of
133
* The page table must be locked and interrupts must be disabled.
135
* @param as Address space to wich page belongs.
136
* @param page Virtual address of the page to be demapped.
138
void page_mapping_remove(as_t *as, uintptr_t page)
140
ASSERT(page_mapping_operations);
141
ASSERT(page_mapping_operations->mapping_remove);
143
page_mapping_operations->mapping_remove(as, page);
145
/* Repel prefetched accesses to the old mapping. */
149
/** Find mapping for virtual page
151
* Find mapping for virtual page.
153
* The page table must be locked and interrupts must be disabled.
155
* @param as Address space to wich page belongs.
156
* @param page Virtual page.
158
* @return NULL if there is no such mapping; requested mapping
161
pte_t *page_mapping_find(as_t *as, uintptr_t page)
163
ASSERT(page_mapping_operations);
164
ASSERT(page_mapping_operations->mapping_find);
166
return page_mapping_operations->mapping_find(as, page);