2
* Copyright (c) 2006 Jakub Jermar
3
* Copyright (c) 2006 Jakub Vana
6
* Redistribution and use in source and binary forms, with or without
7
* modification, are permitted provided that the following conditions
10
* - Redistributions of source code must retain the above copyright
11
* notice, this list of conditions and the following disclaimer.
12
* - Redistributions in binary form must reproduce the above copyright
13
* notice, this list of conditions and the following disclaimer in the
14
* documentation and/or other materials provided with the distribution.
15
* - The name of the author may not be used to endorse or promote products
16
* derived from this software without specific prior written permission.
18
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
/** @addtogroup ia64mm
36
#include <arch/mm/page.h>
37
#include <genarch/mm/page_ht.h>
39
#include <arch/mm/asid.h>
40
#include <arch/mm/vhpt.h>
41
#include <arch/types.h>
48
#include <arch/barrier.h>
52
static void set_environment(void);
54
/** Initialize ia64 virtual address translation subsystem. */
55
void page_arch_init(void)
57
page_mapping_operations = &ht_mapping_operations;
62
/** Initialize VHPT and region registers. */
63
void set_environment(void)
73
* First set up kernel region register.
74
* This is redundant (see start.S) but we keep it here just for sure.
76
rr.word = rr_read(VRN_KERNEL);
77
rr.map.ve = 0; /* disable VHPT walker */
78
rr.map.ps = PAGE_WIDTH;
79
rr.map.rid = ASID2RID(ASID_KERNEL, VRN_KERNEL);
80
rr_write(VRN_KERNEL, rr.word);
85
* And setup the rest of region register.
87
for(i = 0; i < REGION_REGISTERS; i++) {
93
rr.map.ve = 0; /* disable VHPT walker */
94
rr.map.rid = RID_KERNEL;
95
rr.map.ps = PAGE_WIDTH;
102
vhpt_base = vhpt_set_up();
105
* Set up PTA register.
107
pta.word = pta_read();
109
pta.map.ve = 0; /* disable VHPT walker */
110
pta.map.base = 0 >> PTA_BASE_SHIFT;
112
pta.map.ve = 1; /* enable VHPT walker */
113
pta.map.base = vhpt_base >> PTA_BASE_SHIFT;
115
pta.map.vf = 1; /* large entry format */
116
pta.map.size = VHPT_WIDTH;
122
/** Calculate address of collision chain from VPN and ASID.
124
* Interrupts must be disabled.
126
* @param page Address of virtual page including VRN bits.
127
* @param asid Address space identifier.
129
* @return VHPT entry address.
131
vhpt_entry_t *vhpt_hash(uintptr_t page, asid_t asid)
133
region_register rr_save, rr;
138
vrn = page >> VRN_SHIFT;
139
rid = ASID2RID(asid, vrn);
141
rr_save.word = rr_read(vrn);
142
if (rr_save.map.rid == rid) {
144
* The RID is already in place, compute thash and return.
146
v = (vhpt_entry_t *) thash(page);
151
* The RID must be written to some region register.
152
* To speed things up, register indexed by vrn is used.
154
rr.word = rr_save.word;
156
rr_write(vrn, rr.word);
158
v = (vhpt_entry_t *) thash(page);
159
rr_write(vrn, rr_save.word);
166
/** Compare ASID and VPN against PTE.
168
* Interrupts must be disabled.
170
* @param page Address of virtual page including VRN bits.
171
* @param asid Address space identifier.
173
* @return True if page and asid match the page and asid of t,
176
bool vhpt_compare(uintptr_t page, asid_t asid, vhpt_entry_t *v)
178
region_register rr_save, rr;
185
vrn = page >> VRN_SHIFT;
186
rid = ASID2RID(asid, vrn);
188
rr_save.word = rr_read(vrn);
189
if (rr_save.map.rid == rid) {
191
* The RID is already in place, compare ttag with t and return.
193
return ttag(page) == v->present.tag.tag_word;
197
* The RID must be written to some region register.
198
* To speed things up, register indexed by vrn is used.
200
rr.word = rr_save.word;
202
rr_write(vrn, rr.word);
204
match = (ttag(page) == v->present.tag.tag_word);
205
rr_write(vrn, rr_save.word);
212
/** Set up one VHPT entry.
214
* @param v VHPT entry to be set up.
215
* @param page Virtual address of the page mapped by the entry.
216
* @param asid Address space identifier of the address space to which
218
* @param frame Physical address of the frame to wich page is mapped.
219
* @param flags Different flags for the mapping.
222
vhpt_set_record(vhpt_entry_t *v, uintptr_t page, asid_t asid, uintptr_t frame,
225
region_register rr_save, rr;
232
vrn = page >> VRN_SHIFT;
233
rid = ASID2RID(asid, vrn);
238
rr_save.word = rr_read(vrn);
239
rr.word = rr_save.word;
241
rr_write(vrn, rr.word);
244
rr_write(vrn, rr_save.word);
257
v->present.ma = (flags & PAGE_CACHEABLE) ?
258
MA_WRITEBACK : MA_UNCACHEABLE;
259
v->present.a = false; /* not accessed */
260
v->present.d = false; /* not dirty */
261
v->present.pl = (flags & PAGE_USER) ? PL_USER : PL_KERNEL;
262
v->present.ar = (flags & PAGE_WRITE) ? AR_WRITE : AR_READ;
263
v->present.ar |= (flags & PAGE_EXEC) ? AR_EXECUTE : 0;
264
v->present.ppn = frame >> PPN_SHIFT;
265
v->present.ed = false; /* exception not deffered */
266
v->present.ps = PAGE_WIDTH;
268
v->present.tag.tag_word = tag;
271
uintptr_t hw_map(uintptr_t physaddr, size_t size __attribute__ ((unused)))
273
/* THIS is a dirty hack. */
274
return (uintptr_t)((uint64_t)(PA2KA(physaddr)) + VIO_OFFSET);