34
34
#include "mmu_decl.h"
36
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
41
* Handle batching of page table freeing on SMP. Page tables are
42
* queued up and send to be freed later by RCU in order to avoid
43
* freeing a page table page that is being walked without locks
46
static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
47
static unsigned long pte_freelist_forced_free;
49
struct pte_freelist_batch
53
unsigned long tables[0];
56
#define PTE_FREELIST_SIZE \
57
((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \
58
/ sizeof(unsigned long))
60
static void pte_free_smp_sync(void *arg)
62
/* Do nothing, just ensure we sync with all CPUs */
65
/* This is only called when we are critically out of memory
66
* (and fail to get a page in pte_free_tlb).
68
static void pgtable_free_now(void *table, unsigned shift)
70
pte_freelist_forced_free++;
72
smp_call_function(pte_free_smp_sync, NULL, 1);
74
pgtable_free(table, shift);
77
static void pte_free_rcu_callback(struct rcu_head *head)
79
struct pte_freelist_batch *batch =
80
container_of(head, struct pte_freelist_batch, rcu);
83
for (i = 0; i < batch->index; i++) {
84
void *table = (void *)(batch->tables[i] & ~MAX_PGTABLE_INDEX_SIZE);
85
unsigned shift = batch->tables[i] & MAX_PGTABLE_INDEX_SIZE;
87
pgtable_free(table, shift);
90
free_page((unsigned long)batch);
93
static void pte_free_submit(struct pte_freelist_batch *batch)
95
call_rcu_sched(&batch->rcu, pte_free_rcu_callback);
98
void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift)
100
/* This is safe since tlb_gather_mmu has disabled preemption */
101
struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
104
if (atomic_read(&tlb->mm->mm_users) < 2 ||
105
cpumask_equal(mm_cpumask(tlb->mm), cpumask_of(smp_processor_id()))){
106
pgtable_free(table, shift);
110
if (*batchp == NULL) {
111
*batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
112
if (*batchp == NULL) {
113
pgtable_free_now(table, shift);
116
(*batchp)->index = 0;
118
BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
119
pgf = (unsigned long)table | shift;
120
(*batchp)->tables[(*batchp)->index++] = pgf;
121
if ((*batchp)->index == PTE_FREELIST_SIZE) {
122
pte_free_submit(*batchp);
127
void pte_free_finish(void)
129
/* This is safe since tlb_gather_mmu has disabled preemption */
130
struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
134
pte_free_submit(*batchp);
138
#endif /* CONFIG_SMP */
140
36
static inline int is_exec_fault(void)
142
38
return current->thread.regs && TRAP(current->thread.regs) == 0x400;