1
#ifndef _ASM_IA64_MMU_CONTEXT_H
2
#define _ASM_IA64_MMU_CONTEXT_H
5
* Copyright (C) 1998-2002 Hewlett-Packard Co
6
* David Mosberger-Tang <davidm@hpl.hp.com>
10
* Routines to manage the allocation of task context numbers. Task context
11
* numbers are used to reduce or eliminate the need to perform TLB flushes
12
* due to context switches. Context numbers are implemented using ia-64
13
* region ids. Since the IA-64 TLB does not consider the region number when
14
* performing a TLB lookup, we need to assign a unique region id to each
15
* region in a process. We use the least significant three bits in aregion
16
* id for this purpose.
19
#define IA64_REGION_ID_KERNEL 0 /* the kernel's region id (tlb.c depends on this being 0) */
21
#define ia64_rid(ctx,addr) (((ctx) << 3) | (addr >> 61))
23
# include <asm/page.h>
26
#include <linux/compiler.h>
27
#include <linux/percpu.h>
28
#include <linux/sched.h>
29
#include <linux/spinlock.h>
31
#include <asm/processor.h>
32
#include <asm-generic/mm_hooks.h>
36
unsigned int next; /* next context number to use */
37
unsigned int limit; /* available free range */
38
unsigned int max_ctx; /* max. context value supported by all CPUs */
39
/* call wrap_mmu_context when next >= max */
40
unsigned long *bitmap; /* bitmap size is max_ctx+1 */
41
unsigned long *flushmap;/* pending rid to be flushed */
44
extern struct ia64_ctx ia64_ctx;
45
DECLARE_PER_CPU(u8, ia64_need_tlb_flush);
47
extern void mmu_context_init (void);
48
extern void wrap_mmu_context (struct mm_struct *mm);
51
enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk)
56
* When the context counter wraps around all TLBs need to be flushed because
57
* an old context number might have been reused. This is signalled by the
58
* ia64_need_tlb_flush per-CPU variable, which is checked in the routine
59
* below. Called by activate_mm(). <efocht@ess.nec.de>
62
delayed_tlb_flush (void)
64
extern void local_flush_tlb_all (void);
67
if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) {
68
spin_lock_irqsave(&ia64_ctx.lock, flags);
69
if (__ia64_per_cpu_var(ia64_need_tlb_flush)) {
70
local_flush_tlb_all();
71
__ia64_per_cpu_var(ia64_need_tlb_flush) = 0;
73
spin_unlock_irqrestore(&ia64_ctx.lock, flags);
77
static inline nv_mm_context_t
78
get_mmu_context (struct mm_struct *mm)
81
nv_mm_context_t context = mm->context;
86
spin_lock_irqsave(&ia64_ctx.lock, flags);
87
/* re-check, now that we've got the lock: */
88
context = mm->context;
90
cpumask_clear(mm_cpumask(mm));
91
if (ia64_ctx.next >= ia64_ctx.limit) {
92
ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap,
93
ia64_ctx.max_ctx, ia64_ctx.next);
94
ia64_ctx.limit = find_next_bit(ia64_ctx.bitmap,
95
ia64_ctx.max_ctx, ia64_ctx.next);
96
if (ia64_ctx.next >= ia64_ctx.max_ctx)
99
mm->context = context = ia64_ctx.next++;
100
__set_bit(context, ia64_ctx.bitmap);
102
spin_unlock_irqrestore(&ia64_ctx.lock, flags);
105
* Ensure we're not starting to use "context" before any old
106
* uses of it are gone from our TLB.
114
* Initialize context number to some sane value. MM is guaranteed to be a
115
* brand-new address-space, so no TLB flushing is needed, ever.
118
init_new_context (struct task_struct *p, struct mm_struct *mm)
125
destroy_context (struct mm_struct *mm)
131
reload_context (nv_mm_context_t context)
134
unsigned long rid_incr = 0;
135
unsigned long rr0, rr1, rr2, rr3, rr4, old_rr4;
137
old_rr4 = ia64_get_rr(RGN_BASE(RGN_HPAGE));
138
rid = context << 3; /* make space for encoding the region number */
141
/* encode the region id, preferred page size, and VHPT enable bit: */
142
rr0 = (rid << 8) | (PAGE_SHIFT << 2) | 1;
143
rr1 = rr0 + 1*rid_incr;
144
rr2 = rr0 + 2*rid_incr;
145
rr3 = rr0 + 3*rid_incr;
146
rr4 = rr0 + 4*rid_incr;
147
#ifdef CONFIG_HUGETLB_PAGE
148
rr4 = (rr4 & (~(0xfcUL))) | (old_rr4 & 0xfc);
151
# error "reload_context assumes RGN_HPAGE is 4"
155
ia64_set_rr0_to_rr4(rr0, rr1, rr2, rr3, rr4);
156
ia64_srlz_i(); /* srlz.i implies srlz.d */
160
* Must be called with preemption off
163
activate_context (struct mm_struct *mm)
165
nv_mm_context_t context;
168
context = get_mmu_context(mm);
169
if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
170
cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
171
reload_context(context);
173
* in the unlikely event of a TLB-flush by another thread,
176
} while (unlikely(context != mm->context));
179
#define deactivate_mm(tsk,mm) do { } while (0)
182
* Switch from address space PREV to address space NEXT.
185
activate_mm (struct mm_struct *prev, struct mm_struct *next)
188
* We may get interrupts here, but that's OK because interrupt
189
* handlers cannot touch user-space.
191
ia64_set_kr(IA64_KR_PT_BASE, __pa(next->pgd));
192
activate_context(next);
195
#define switch_mm(prev_mm,next_mm,next_task) activate_mm(prev_mm, next_mm)
197
# endif /* ! __ASSEMBLY__ */
198
#endif /* _ASM_IA64_MMU_CONTEXT_H */