1
#ifndef _ASM_M32R_PGTABLE_H
2
#define _ASM_M32R_PGTABLE_H
4
#include <asm-generic/4level-fixup.h>
8
* The Linux memory management assumes a three-level page table setup. On
9
* the M32R, we use that, but "fold" the mid level into the top-level page
10
* table, so that we physically have the same two-level page table as the
13
* This file contains the functions and defines necessary to modify and use
14
* the M32R page table tree.
17
/* CAUTION!: If you change macro definitions in this file, you might have to
18
* change arch/m32r/mmu.S manually.
23
#include <linux/threads.h>
24
#include <linux/bitops.h>
25
#include <asm/processor.h>
26
#include <asm/addrspace.h>
30
struct vm_area_struct;
32
extern pgd_t swapper_pg_dir[1024];
33
extern void paging_init(void);
36
* ZERO_PAGE is a global shared page that is always zero: used
37
* for zero-mapped memory areas etc..
39
extern unsigned long empty_zero_page[1024];
40
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
42
#endif /* !__ASSEMBLY__ */
45
#include <asm/pgtable-2level.h>
48
#define pgtable_cache_init() do { } while (0)
50
#define PMD_SIZE (1UL << PMD_SHIFT)
51
#define PMD_MASK (~(PMD_SIZE - 1))
52
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
53
#define PGDIR_MASK (~(PGDIR_SIZE - 1))
55
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
56
#define FIRST_USER_ADDRESS 0
59
/* Just any arbitrary offset to the start of the vmalloc VM area: the
60
* current 8MB value just means that there will be a 8MB "hole" after the
61
* physical memory until the kernel virtual memory starts. That means that
62
* any out-of-bounds memory accesses will hopefully be caught.
63
* The vmalloc() routines leaves a hole of 4kB between each vmalloced
64
* area for the same reason. ;)
66
#define VMALLOC_START KSEG2
67
#define VMALLOC_END KSEG3
72
* [0] [1:19] [20:23] [24:31]
73
* +-----------------------+----+-------------+
75
* +-----------------------+----+-------------+
76
* +-+---------------------+----+-+---+-+-+-+-+
77
* |0 PPN |0000|N|AC |L|G|V| |
78
* +-+---------------------+----+-+---+-+-+-+-+
82
#define _PAGE_BIT_DIRTY 0 /* software: page changed */
83
#define _PAGE_BIT_FILE 0 /* when !present: nonlinear file
85
#define _PAGE_BIT_PRESENT 1 /* Valid: page is valid */
86
#define _PAGE_BIT_GLOBAL 2 /* Global */
87
#define _PAGE_BIT_LARGE 3 /* Large */
88
#define _PAGE_BIT_EXEC 4 /* Execute */
89
#define _PAGE_BIT_WRITE 5 /* Write */
90
#define _PAGE_BIT_READ 6 /* Read */
91
#define _PAGE_BIT_NONCACHABLE 7 /* Non cachable */
92
#define _PAGE_BIT_ACCESSED 8 /* software: page referenced */
93
#define _PAGE_BIT_PROTNONE 9 /* software: if not present */
95
#define _PAGE_DIRTY (1UL << _PAGE_BIT_DIRTY)
96
#define _PAGE_FILE (1UL << _PAGE_BIT_FILE)
97
#define _PAGE_PRESENT (1UL << _PAGE_BIT_PRESENT)
98
#define _PAGE_GLOBAL (1UL << _PAGE_BIT_GLOBAL)
99
#define _PAGE_LARGE (1UL << _PAGE_BIT_LARGE)
100
#define _PAGE_EXEC (1UL << _PAGE_BIT_EXEC)
101
#define _PAGE_WRITE (1UL << _PAGE_BIT_WRITE)
102
#define _PAGE_READ (1UL << _PAGE_BIT_READ)
103
#define _PAGE_NONCACHABLE (1UL << _PAGE_BIT_NONCACHABLE)
104
#define _PAGE_ACCESSED (1UL << _PAGE_BIT_ACCESSED)
105
#define _PAGE_PROTNONE (1UL << _PAGE_BIT_PROTNONE)
107
#define _PAGE_TABLE \
108
( _PAGE_PRESENT | _PAGE_WRITE | _PAGE_READ | _PAGE_ACCESSED \
110
#define _KERNPG_TABLE \
111
( _PAGE_PRESENT | _PAGE_WRITE | _PAGE_READ | _PAGE_ACCESSED \
113
#define _PAGE_CHG_MASK \
114
( PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY )
118
__pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
119
#define PAGE_SHARED \
120
__pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_READ | _PAGE_ACCESSED)
121
#define PAGE_SHARED_EXEC \
122
__pgprot(_PAGE_PRESENT | _PAGE_EXEC | _PAGE_WRITE | _PAGE_READ \
125
__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_ACCESSED)
126
#define PAGE_COPY_EXEC \
127
__pgprot(_PAGE_PRESENT | _PAGE_EXEC | _PAGE_READ | _PAGE_ACCESSED)
128
#define PAGE_READONLY \
129
__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_ACCESSED)
130
#define PAGE_READONLY_EXEC \
131
__pgprot(_PAGE_PRESENT | _PAGE_EXEC | _PAGE_READ | _PAGE_ACCESSED)
133
#define __PAGE_KERNEL \
134
( _PAGE_PRESENT | _PAGE_EXEC | _PAGE_WRITE | _PAGE_READ | _PAGE_DIRTY \
136
#define __PAGE_KERNEL_RO ( __PAGE_KERNEL & ~_PAGE_WRITE )
137
#define __PAGE_KERNEL_NOCACHE ( __PAGE_KERNEL | _PAGE_NONCACHABLE)
139
#define MAKE_GLOBAL(x) __pgprot((x) | _PAGE_GLOBAL)
141
#define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL)
142
#define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO)
143
#define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
146
#define PAGE_NONE __pgprot(0)
147
#define PAGE_SHARED __pgprot(0)
148
#define PAGE_SHARED_EXEC __pgprot(0)
149
#define PAGE_COPY __pgprot(0)
150
#define PAGE_COPY_EXEC __pgprot(0)
151
#define PAGE_READONLY __pgprot(0)
152
#define PAGE_READONLY_EXEC __pgprot(0)
154
#define PAGE_KERNEL __pgprot(0)
155
#define PAGE_KERNEL_RO __pgprot(0)
156
#define PAGE_KERNEL_NOCACHE __pgprot(0)
157
#endif /* CONFIG_MMU */
160
#define __P000 PAGE_NONE
161
#define __P001 PAGE_READONLY
162
#define __P010 PAGE_COPY
163
#define __P011 PAGE_COPY
164
#define __P100 PAGE_READONLY_EXEC
165
#define __P101 PAGE_READONLY_EXEC
166
#define __P110 PAGE_COPY_EXEC
167
#define __P111 PAGE_COPY_EXEC
169
#define __S000 PAGE_NONE
170
#define __S001 PAGE_READONLY
171
#define __S010 PAGE_SHARED
172
#define __S011 PAGE_SHARED
173
#define __S100 PAGE_READONLY_EXEC
174
#define __S101 PAGE_READONLY_EXEC
175
#define __S110 PAGE_SHARED_EXEC
176
#define __S111 PAGE_SHARED_EXEC
178
/* page table for 0-4MB for everybody */
180
#define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
181
#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
183
#define pmd_none(x) (!pmd_val(x))
184
#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
185
#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
186
#define pmd_bad(x) ((pmd_val(x) & ~PAGE_MASK) != _KERNPG_TABLE)
188
#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT))
191
* The following only work if pte_present() is true.
192
* Undefined behaviour if not..
194
static inline int pte_dirty(pte_t pte)
196
return pte_val(pte) & _PAGE_DIRTY;
199
static inline int pte_young(pte_t pte)
201
return pte_val(pte) & _PAGE_ACCESSED;
204
static inline int pte_write(pte_t pte)
206
return pte_val(pte) & _PAGE_WRITE;
210
* The following only works if pte_present() is not true.
212
static inline int pte_file(pte_t pte)
214
return pte_val(pte) & _PAGE_FILE;
217
static inline int pte_special(pte_t pte)
222
static inline pte_t pte_mkclean(pte_t pte)
224
pte_val(pte) &= ~_PAGE_DIRTY;
228
static inline pte_t pte_mkold(pte_t pte)
230
pte_val(pte) &= ~_PAGE_ACCESSED;
234
static inline pte_t pte_wrprotect(pte_t pte)
236
pte_val(pte) &= ~_PAGE_WRITE;
240
static inline pte_t pte_mkdirty(pte_t pte)
242
pte_val(pte) |= _PAGE_DIRTY;
246
static inline pte_t pte_mkyoung(pte_t pte)
248
pte_val(pte) |= _PAGE_ACCESSED;
252
static inline pte_t pte_mkwrite(pte_t pte)
254
pte_val(pte) |= _PAGE_WRITE;
258
static inline pte_t pte_mkspecial(pte_t pte)
263
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
265
return test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep);
268
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
270
clear_bit(_PAGE_BIT_WRITE, ptep);
274
* Macro and implementation to make a page protection as uncachable.
276
static inline pgprot_t pgprot_noncached(pgprot_t _prot)
278
unsigned long prot = pgprot_val(_prot);
280
prot |= _PAGE_NONCACHABLE;
281
return __pgprot(prot);
284
#define pgprot_writecombine(prot) pgprot_noncached(prot)
287
* Conversion functions: convert a page and protection to a page entry,
288
* and a page entry and page directory to the page they refer to.
290
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), pgprot)
292
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
294
set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) \
295
| pgprot_val(newprot)));
301
* Conversion functions: convert a page and protection to a page entry,
302
* and a page entry and page directory to the page they refer to.
305
static inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
307
pmd_val(*pmdp) = (((unsigned long) ptep) & PAGE_MASK);
310
#define pmd_page_vaddr(pmd) \
311
((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
313
#ifndef CONFIG_DISCONTIGMEM
314
#define pmd_page(pmd) (mem_map + ((pmd_val(pmd) >> PAGE_SHIFT) - PFN_BASE))
315
#endif /* !CONFIG_DISCONTIGMEM */
317
/* to find an entry in a page-table-directory. */
318
#define pgd_index(address) \
319
(((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
321
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
323
/* to find an entry in a kernel page-table-directory */
324
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
326
#define pmd_index(address) \
327
(((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
329
#define pte_index(address) \
330
(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
331
#define pte_offset_kernel(dir, address) \
332
((pte_t *)pmd_page_vaddr(*(dir)) + pte_index(address))
333
#define pte_offset_map(dir, address) \
334
((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
335
#define pte_unmap(pte) do { } while (0)
337
/* Encode and de-code a swap entry */
338
#define __swp_type(x) (((x).val >> 2) & 0x1f)
339
#define __swp_offset(x) ((x).val >> 10)
340
#define __swp_entry(type, offset) \
341
((swp_entry_t) { ((type) << 2) | ((offset) << 10) })
342
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
343
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
345
#endif /* !__ASSEMBLY__ */
347
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
348
#define kern_addr_valid(addr) (1)
350
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
351
remap_pfn_range(vma, vaddr, pfn, size, prot)
353
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
354
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
355
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
356
#define __HAVE_ARCH_PTE_SAME
357
#include <asm-generic/pgtable.h>
359
#endif /* __KERNEL__ */
361
#endif /* _ASM_M32R_PGTABLE_H */