1
#ifndef _ASM_IA64_TLB_H
2
#define _ASM_IA64_TLB_H
4
* Based on <asm-generic/tlb.h>.
6
* Copyright (C) 2002-2003 Hewlett-Packard Co
7
* David Mosberger-Tang <davidm@hpl.hp.com>
10
* Removing a translation from a page table (including TLB-shootdown) is a four-step
13
* (1) Flush (virtual) caches --- ensures virtual memory is coherent with kernel memory
14
* (this is a no-op on ia64).
15
* (2) Clear the relevant portions of the page-table
16
* (3) Flush the TLBs --- ensures that stale content is gone from CPU TLBs
17
* (4) Release the pages that were freed up in step (2).
19
* Note that the ordering of these steps is crucial to avoid races on MP machines.
21
* The Linux kernel defines several platform-specific hooks for TLB-shootdown. When
22
* unmapping a portion of the virtual address space, these hooks are called according to
23
* the following template:
25
* tlb <- tlb_gather_mmu(mm, full_mm_flush); // start unmap for address space MM
27
* for each vma that needs a shootdown do {
28
* tlb_start_vma(tlb, vma);
29
* for each page-table-entry PTE that needs to be removed do {
30
* tlb_remove_tlb_entry(tlb, pte, address);
31
* if (pte refers to a normal page) {
32
* tlb_remove_page(tlb, page);
35
* tlb_end_vma(tlb, vma);
38
* tlb_finish_mmu(tlb, start, end); // finish unmap for address space MM
41
#include <linux/pagemap.h>
42
#include <linux/swap.h>
44
#include <asm/pgalloc.h>
45
#include <asm/processor.h>
46
#include <asm/tlbflush.h>
47
#include <asm/machvec.h>
50
# define FREE_PTE_NR 2048
51
# define tlb_fast_mode(tlb) ((tlb)->nr == ~0U)
53
# define FREE_PTE_NR 0
54
# define tlb_fast_mode(tlb) (1)
59
unsigned int nr; /* == ~0U => fast mode */
60
unsigned char fullmm; /* non-zero means full mm flush */
61
unsigned char need_flush; /* really unmapped some PTEs? */
62
unsigned long start_addr;
63
unsigned long end_addr;
64
struct page *pages[FREE_PTE_NR];
67
struct ia64_tr_entry {
72
}; /*Record for tr entry!*/
74
extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size);
75
extern void ia64_ptr_entry(u64 target_mask, int slot);
77
extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
80
region register macros
82
#define RR_TO_VE(val) (((val) >> 0) & 0x0000000000000001)
83
#define RR_VE(val) (((val) & 0x0000000000000001) << 0)
84
#define RR_VE_MASK 0x0000000000000001L
86
#define RR_TO_PS(val) (((val) >> 2) & 0x000000000000003f)
87
#define RR_PS(val) (((val) & 0x000000000000003f) << 2)
88
#define RR_PS_MASK 0x00000000000000fcL
90
#define RR_RID_MASK 0x00000000ffffff00L
91
#define RR_TO_RID(val) ((val >> 8) & 0xffffff)
93
/* Users of the generic TLB shootdown code must declare this storage space. */
94
DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
97
* Flush the TLB for address range START to END and, if not in fast mode, release the
98
* freed pages that where gathered up to this point.
101
ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
105
if (!tlb->need_flush)
111
* Tearing down the entire address space. This happens both as a result
112
* of exit() and execve(). The latter case necessitates the call to
113
* flush_tlb_mm() here.
115
flush_tlb_mm(tlb->mm);
116
} else if (unlikely (end - start >= 1024*1024*1024*1024UL
117
|| REGION_NUMBER(start) != REGION_NUMBER(end - 1)))
120
* If we flush more than a tera-byte or across regions, we're probably
121
* better off just flushing the entire TLB(s). This should be very rare
122
* and is not worth optimizing for.
127
* XXX fix me: flush_tlb_range() should take an mm pointer instead of a
130
struct vm_area_struct vma;
133
/* flush the address range from the tlb: */
134
flush_tlb_range(&vma, start, end);
135
/* now flush the virt. page-table area mapping the address range: */
136
flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end));
139
/* lastly, release the freed pages */
141
if (!tlb_fast_mode(tlb)) {
144
tlb->start_addr = ~0UL;
145
for (i = 0; i < nr; ++i)
146
free_page_and_swap_cache(tlb->pages[i]);
151
* Return a pointer to an initialized struct mmu_gather.
153
static inline struct mmu_gather *
154
tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush)
156
struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
160
* Use fast mode if only 1 CPU is online.
162
* It would be tempting to turn on fast-mode for full_mm_flush as well. But this
163
* doesn't work because of speculative accesses and software prefetching: the page
164
* table of "mm" may (and usually is) the currently active page table and even
165
* though the kernel won't do any user-space accesses during the TLB shoot down, a
166
* compiler might use speculation or lfetch.fault on what happens to be a valid
167
* user-space address. This in turn could trigger a TLB miss fault (or a VHPT
168
* walk) and re-insert a TLB entry we just removed. Slow mode avoids such
169
* problems. (We could make fast-mode work by switching the current task to a
170
* different "mm" during the shootdown.) --davidm 08/02/2002
172
tlb->nr = (num_online_cpus() == 1) ? ~0U : 0;
173
tlb->fullmm = full_mm_flush;
174
tlb->start_addr = ~0UL;
179
* Called at the end of the shootdown operation to free up any resources that were
183
tlb_finish_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
186
* Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
189
ia64_tlb_flush_mmu(tlb, start, end);
191
/* keep the page table cache within bounds */
194
put_cpu_var(mmu_gathers);
198
* Logically, this routine frees PAGE. On MP machines, the actual freeing of the page
199
* must be delayed until after the TLB has been flushed (see comments at the beginning of
203
tlb_remove_page (struct mmu_gather *tlb, struct page *page)
207
if (tlb_fast_mode(tlb)) {
208
free_page_and_swap_cache(page);
211
tlb->pages[tlb->nr++] = page;
212
if (tlb->nr >= FREE_PTE_NR)
213
ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
217
* Remove TLB entry for PTE mapped at virtual address ADDRESS. This is called for any
218
* PTE, not just those pointing to (normal) physical memory.
221
__tlb_remove_tlb_entry (struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
223
if (tlb->start_addr == ~0UL)
224
tlb->start_addr = address;
225
tlb->end_addr = address + PAGE_SIZE;
228
#define tlb_migrate_finish(mm) platform_tlb_migrate_finish(mm)
230
#define tlb_start_vma(tlb, vma) do { } while (0)
231
#define tlb_end_vma(tlb, vma) do { } while (0)
233
#define tlb_remove_tlb_entry(tlb, ptep, addr) \
235
tlb->need_flush = 1; \
236
__tlb_remove_tlb_entry(tlb, ptep, addr); \
239
#define pte_free_tlb(tlb, ptep, address) \
241
tlb->need_flush = 1; \
242
__pte_free_tlb(tlb, ptep, address); \
245
#define pmd_free_tlb(tlb, ptep, address) \
247
tlb->need_flush = 1; \
248
__pmd_free_tlb(tlb, ptep, address); \
251
#define pud_free_tlb(tlb, pudp, address) \
253
tlb->need_flush = 1; \
254
__pud_free_tlb(tlb, pudp, address); \
257
#endif /* _ASM_IA64_TLB_H */