2
* This file contains the routines setting up the linux page tables.
4
* Copyright (C) 2008 Michal Simek
5
* Copyright (C) 2008 PetaLogix
7
* Copyright (C) 2007 Xilinx, Inc. All rights reserved.
9
* Derived from arch/ppc/mm/pgtable.c:
12
* Derived from arch/ppc/mm/init.c:
13
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
15
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
16
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
17
* Copyright (C) 1996 Paul Mackerras
18
* Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
20
* Derived from "arch/i386/mm/init.c"
21
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
23
* This file is subject to the terms and conditions of the GNU General
24
* Public License. See the file COPYING in the main directory of this
25
* archive for more details.
29
#include <linux/kernel.h>
30
#include <linux/module.h>
31
#include <linux/types.h>
32
#include <linux/vmalloc.h>
33
#include <linux/init.h>
35
#include <asm/pgtable.h>
36
#include <asm/pgalloc.h>
39
#include <asm/sections.h>
41
#define flush_HPTE(X, va, pg) _tlbie(va)
43
unsigned long ioremap_base;
44
unsigned long ioremap_bot;
45
EXPORT_SYMBOL(ioremap_bot);
47
/* The maximum lowmem defaults to 768Mb, but this can be configured to
50
#define MAX_LOW_MEM CONFIG_LOWMEM_SIZE
53
struct pgtable_cache_struct quicklists;
56
static void __iomem *__ioremap(phys_addr_t addr, unsigned long size,
64
* Choose an address to map it to.
65
* Once the vmalloc system is running, we use it.
66
* Before then, we use space going down from ioremap_base
67
* (ioremap_bot records where we're up to).
70
size = PAGE_ALIGN(addr + size) - p;
73
* Don't allow anybody to remap normal RAM that we're using.
74
* mem_init() sets high_memory so only do the check after that.
76
* However, allow remap of rootfs: TBD
79
p >= memory_start && p < virt_to_phys(high_memory) &&
80
!(p >= virt_to_phys((unsigned long)&__bss_stop) &&
81
p < virt_to_phys((unsigned long)__bss_stop))) {
82
printk(KERN_WARNING "__ioremap(): phys addr "PTE_FMT
83
" is RAM lr %p\n", (unsigned long)p,
84
__builtin_return_address(0));
92
* Is it already mapped? If the whole area is mapped then we're
93
* done, otherwise remap it since we want to keep the virt addrs for
94
* each request contiguous.
96
* We make the assumption here that if the bottom and top
97
* of the range we want are mapped then it's mapped to the
98
* same virt address (and this is contiguous).
103
struct vm_struct *area;
104
area = get_vm_area(size, VM_IOREMAP);
107
v = (unsigned long) area->addr;
109
v = (ioremap_bot -= size);
112
if ((flags & _PAGE_PRESENT) == 0)
113
flags |= _PAGE_KERNEL;
114
if (flags & _PAGE_NO_CACHE)
115
flags |= _PAGE_GUARDED;
118
for (i = 0; i < size && err == 0; i += PAGE_SIZE)
119
err = map_page(v + i, p + i, flags);
126
return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
129
void __iomem *ioremap(phys_addr_t addr, unsigned long size)
131
return __ioremap(addr, size, _PAGE_NO_CACHE);
133
EXPORT_SYMBOL(ioremap);
135
void iounmap(void *addr)
137
if (addr > high_memory && (unsigned long) addr < ioremap_bot)
138
vfree((void *) (PAGE_MASK & (unsigned long) addr));
140
EXPORT_SYMBOL(iounmap);
143
int map_page(unsigned long va, phys_addr_t pa, int flags)
148
/* Use upper 10 bits of VA to index the first level map */
149
pd = pmd_offset(pgd_offset_k(va), va);
150
/* Use middle 10 bits of VA to index the second-level map */
151
pg = pte_alloc_kernel(pd, va); /* from powerpc - pgtable.c */
152
/* pg = pte_alloc_kernel(&init_mm, pd, va); */
156
set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
158
if (unlikely(mem_init_done))
159
flush_HPTE(0, va, pmd_val(*pd));
160
/* flush_HPTE(0, va, pg); */
166
* Map in all of physical memory starting at CONFIG_KERNEL_START.
168
void __init mapin_ram(void)
170
unsigned long v, p, s, f;
172
v = CONFIG_KERNEL_START;
174
for (s = 0; s < memory_size; s += PAGE_SIZE) {
175
f = _PAGE_PRESENT | _PAGE_ACCESSED |
176
_PAGE_SHARED | _PAGE_HWEXEC;
177
if ((char *) v < _stext || (char *) v >= _etext)
180
/* On the MicroBlaze, no user access
181
forces R/W kernel access */
189
/* is x a power of 2? */
190
#define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0))
192
/* Scan the real Linux page tables and return a PTE pointer for
193
* a virtual address in a context.
194
* Returns true (1) if PTE was found, zero otherwise. The pointer to
195
* the PTE pointer is unmodified if PTE is not found.
197
static int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
204
pgd = pgd_offset(mm, addr & PAGE_MASK);
206
pmd = pmd_offset(pgd, addr & PAGE_MASK);
207
if (pmd_present(*pmd)) {
208
pte = pte_offset_kernel(pmd, addr & PAGE_MASK);
218
/* Find physical address for this virtual address. Normally used by
219
* I/O functions, but anyone can call it.
221
unsigned long iopa(unsigned long addr)
226
struct mm_struct *mm;
228
/* Allow mapping of user addresses (within the thread)
229
* for DMA if necessary.
231
if (addr < TASK_SIZE)
237
if (get_pteptr(mm, addr, &pte))
238
pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);
243
__init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
244
unsigned long address)
248
pte = (pte_t *)__get_free_page(GFP_KERNEL |
249
__GFP_REPEAT | __GFP_ZERO);
251
pte = (pte_t *)early_get_page();